OVC cleanup. (#18649)
* WIP: parameters cleanup * Removed debug output, fixed CLI * Fixed python objects conversion * Finally renamed mmap to share_weights * Fixed TF conversion from a file or a directory * Fixed obvious errors in unit tests * Deleted layouts from OVC. Fixed most of the fails in ovc unit tests (there are still failures) * Clenaup other references to layouts and fixed --version * Fixed case when two model files are passed in TF case * Fixed multiple model parts passing in ovc command line * Tests fixed, support of unnamed input in cli parser. * Remove convert_model from runtime. * Changed silent to verbose. * Removed transform param. * Removed example_input, share_weights from ovc cli tool. * Remove wrong change. * Test fix. * Code corrections. * Returned comment. * WA to fix process hanging after extension loading. * Removed not needed code. * Added comment. --------- Co-authored-by: Sergey Lyalin <sergey.lyalin@intel.com>
This commit is contained in:
parent
1ce744a00f
commit
8d5a0b1d53
@ -68,13 +68,6 @@ from openvino.runtime.ie_api import tensor_from_file
|
||||
from openvino.runtime.ie_api import compile_model
|
||||
|
||||
|
||||
# Model Conversion API
|
||||
try:
|
||||
from openvino.tools.ovc import convert_model, InputCutInfo, LayoutMap
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
# Extend Node class to support binary operators
|
||||
Node.__add__ = opset12.add
|
||||
Node.__sub__ = opset12.subtract
|
||||
|
@ -3,7 +3,8 @@
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from openvino.runtime import serialize, convert_model
|
||||
from openvino.runtime import serialize
|
||||
from openvino.tools.ovc import convert_model
|
||||
from openvino.tools.mo import convert_model as legacy_convert_model
|
||||
from openvino.test_utils import compare_functions
|
||||
|
||||
@ -16,7 +17,10 @@ class CommonMOConvertTest:
|
||||
output_dir = kwargs['output_dir']
|
||||
model_name = kwargs['model_name']
|
||||
del kwargs['output_dir']
|
||||
if 'use_legacy_frontend' in kwargs:
|
||||
del kwargs['model_name']
|
||||
if 'use_legacy_frontend' in kwargs or 'use_convert_model_from_mo' in kwargs:
|
||||
if 'use_convert_model_from_mo' in kwargs:
|
||||
del kwargs['use_convert_model_from_mo']
|
||||
model = legacy_convert_model(**kwargs)
|
||||
else:
|
||||
model = convert_model(**kwargs)
|
||||
|
@ -7,14 +7,13 @@ import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from openvino.tools.mo import mo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def generate_ir(coverage=False, **kwargs):
|
||||
from openvino.tools.mo import mo
|
||||
mo_path = Path(mo.__file__).parent
|
||||
mo_runner = mo_path.joinpath('main.py').as_posix()
|
||||
if coverage:
|
||||
@ -48,15 +47,13 @@ def generate_ir(coverage=False, **kwargs):
|
||||
|
||||
|
||||
def generate_ir_python_api(coverage=False, **kwargs):
|
||||
from openvino.runtime import convert_model, serialize
|
||||
from openvino.tools.mo import convert_model as legacy_convert_model
|
||||
|
||||
if "use_legacy_frontend" in kwargs and kwargs['use_legacy_frontend']:
|
||||
ov_model = legacy_convert_model(**kwargs)
|
||||
else:
|
||||
ov_model = convert_model(**kwargs)
|
||||
from openvino.runtime import serialize
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
out_dir = kwargs['output_dir'] + os.sep + kwargs['model_name'] + ".xml"
|
||||
|
||||
# TODO: Remove usage of legacy params from layer tests and switch to convert_model from tools.ovc
|
||||
ov_model = convert_model(**kwargs)
|
||||
serialize(ov_model, out_dir)
|
||||
|
||||
return 0, ""
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from openvino.runtime import convert_model
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
if __name__ == "__main__":
|
||||
convert_model(help=True)
|
||||
|
@ -4,7 +4,9 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import pytest
|
||||
from openvino.runtime import Model, Layout, PartialShape, Shape, layout_helpers, Type, Dimension, InputCutInfo, LayoutMap
|
||||
from openvino.runtime import Model, Layout, PartialShape, Shape, layout_helpers, Type, Dimension
|
||||
from openvino.tools.ovc import InputCutInfo
|
||||
from openvino.tools.mo import LayoutMap
|
||||
|
||||
from common.mo_convert_test_class import CommonMOConvertTest
|
||||
from common.tf_layer_test_class import save_to_pb
|
||||
@ -132,11 +134,11 @@ class TestComplexParams(CommonMOConvertTest):
|
||||
{'params_test': {'input_shape': [PartialShape([2, 3, 4]),
|
||||
[2, 3, 4],
|
||||
[Dimension(2), Dimension(3), Dimension(4)]],
|
||||
'input':['Input1', 'Input2', 'Relu3']},
|
||||
'input':['Input1', 'Input2', 'Relu3'], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1,Input2,Relu3'}},
|
||||
{'params_test': {'input_shape': [PartialShape([Dimension(), Dimension(1, 3), Dimension(4, -1), Dimension(-1, 5)]),
|
||||
[Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)],
|
||||
[Dimension(), 3, Dimension(4, -1), Dimension(-1, 5)]],
|
||||
[Dimension(), 3, Dimension(4, -1), Dimension(-1, 5)]], 'use_convert_model_from_mo': True,
|
||||
'input':['Input1', 'Input2', 'Relu3']},
|
||||
'params_ref': {'input_shape': "[?,1..3,4..,..5],[?,1..3,4,..5],[?,3,4..,..5]", 'input': 'Input1,Input2,Relu3'}},
|
||||
{'params_test': {'input': [InputCutInfo("Relu1", Shape([3, 2]), Type(np.int32)),
|
||||
@ -149,26 +151,28 @@ class TestComplexParams(CommonMOConvertTest):
|
||||
'params_ref': {'input': "Relu1[3 2]{i32},Relu2[3..10 2..]{i32},Relu3[3 2]{i32}"}},
|
||||
{'params_test': {'output': ["Sigmoid_0", "Sigmoid_2"]},
|
||||
'params_ref': {'output': "Sigmoid_0,Sigmoid_2"}},
|
||||
{'params_test': {'mean_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]}},
|
||||
{'params_test': {'mean_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]},
|
||||
'use_convert_model_from_mo': True},
|
||||
'params_ref': {'mean_values': "Input1[0.5,1.3,0.67],Input2[4.2,6.7,3.15],Input3[0.757,4.6,7.3]"}},
|
||||
{'params_test': {
|
||||
'mean_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]]},
|
||||
'mean_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'mean_values': "[0.5,1.3,0.67],[4.2,6.7,3.15],[0.757,4.6,7.3]"}},
|
||||
{'params_test': {'scale_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]}},
|
||||
{'params_test': {'scale_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]},
|
||||
'use_convert_model_from_mo': True},
|
||||
'params_ref': {'scale_values': "Input1[0.5,1.3,0.67],Input2[4.2,6.7,3.15],Input3[0.757,4.6,7.3]"}},
|
||||
{'params_test': {
|
||||
'scale_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]]},
|
||||
'scale_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'scale_values': "[0.5,1.3,0.67],[4.2,6.7,3.15],[0.757,4.6,7.3]"}},
|
||||
{'params_test': {
|
||||
'source_layout': {'Input1': Layout("nchw"), 'Input2': "nchw", 'Input3': "nc??"}},
|
||||
'source_layout': {'Input1': Layout("nchw"), 'Input2': "nchw", 'Input3': "nc??"}, 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'source_layout': "Input1(nchw),Input2(nchw),Input3(nc??)"}},
|
||||
{'params_test': {
|
||||
'target_layout': {'Input1': Layout("nhwc"), 'Input2': "nhwc", 'Input3': "n??c"}},
|
||||
'target_layout': {'Input1': Layout("nhwc"), 'Input2': "nhwc", 'Input3': "n??c"}, 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'target_layout': "Input1(nhwc),Input2(nhwc),Input3(n??c)"}},
|
||||
{'params_test': {
|
||||
'layout': {'Input1': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc"),
|
||||
'Input2': LayoutMap(source_layout="nc??", target_layout=Layout("n??c")),
|
||||
'Input3': LayoutMap(source_layout="abcd", target_layout="acdb")}},
|
||||
'Input3': LayoutMap(source_layout="abcd", target_layout="acdb")}, 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'layout': "Input1(nchw->nhwc),Input2(nc??->n??c),Input3(abcd->acdb)"}},
|
||||
{'params_test': {'input': [PartialShape([2, 3, 4]), [2, 3, 4], [Dimension(2), Dimension(3), Dimension(4)]]},
|
||||
'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1,Input2,Input3'}},
|
||||
@ -222,13 +226,14 @@ class TestComplexParams(CommonMOConvertTest):
|
||||
test_params = params['params_test']
|
||||
ref_params = params['params_ref']
|
||||
test_params.update({'input_model': tf_net_path})
|
||||
test_params.update({'use_convert_model_from_mo': True})
|
||||
ref_params.update({'input_model': tf_net_path})
|
||||
self._test(temp_dir, test_params, ref_params)
|
||||
|
||||
test_data = [
|
||||
{'params_test': {'input_shape': PartialShape([2, 3, 4])},
|
||||
{'params_test': {'input_shape': PartialShape([2, 3, 4]), 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'input_shape': "[2,3,4]"}},
|
||||
{'params_test': {'input_shape': [Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)]},
|
||||
{'params_test': {'input_shape': [Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'input_shape': "[?,1..3,4,..5]"}},
|
||||
{'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6])},
|
||||
'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]"}},
|
||||
@ -240,17 +245,17 @@ class TestComplexParams(CommonMOConvertTest):
|
||||
'params_ref': {'input': "Relu[3 2]"}},
|
||||
{'params_test': {'input': ("Relu")},
|
||||
'params_ref': {'input': "Relu"}},
|
||||
{'params_test': {'mean_values': [0.5, 1.3, 0.67]},
|
||||
{'params_test': {'mean_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'mean_values': "[0.5,1.3,0.67]"}},
|
||||
{'params_test': {'scale_values': [0.5, 1.3, 0.67]},
|
||||
{'params_test': {'scale_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'scale_values': "[0.5,1.3,0.67]"}},
|
||||
{'params_test': {'source_layout': Layout("nchw")},
|
||||
{'params_test': {'source_layout': Layout("nchw"), 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'source_layout': "nchw"}},
|
||||
{'params_test': {'target_layout': Layout("nchw")},
|
||||
{'params_test': {'target_layout': Layout("nchw"), 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'target_layout': "nchw"}},
|
||||
{'params_test': {'layout': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc")},
|
||||
{'params_test': {'layout': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc"), 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'layout': "nchw->nhwc"}},
|
||||
{'params_test': {'layout': Layout("nchw")},
|
||||
{'params_test': {'layout': Layout("nchw"), 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'layout': "nchw"}},
|
||||
{'params_test': {'input': [3, 2]},
|
||||
'params_ref': {'input': "Input[3 2]"}},
|
||||
@ -266,13 +271,13 @@ class TestComplexParams(CommonMOConvertTest):
|
||||
'params_ref': {'input': "Input[1]{i32}->[10]"}},
|
||||
{'params_test': {'input': (np.int32, [1, 2, 3])},
|
||||
'params_ref': {'input': "Input[1,2,3]{i32}"}},
|
||||
{'params_test': {'input_shape': [Dimension(3, 10), 10, -1]},
|
||||
{'params_test': {'input_shape': [Dimension(3, 10), 10, -1], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'input_shape': '[3..10,10,?]'}},
|
||||
{'params_test': {'input': [Dimension(3, 10), 10, -1]},
|
||||
'params_ref': {'input': 'Input[3..10,10,?]'}},
|
||||
{'params_test': {'input': PartialShape([1, 100, 100, 3]), 'mean_values': [0.5, 1.3, 0.67]},
|
||||
{'params_test': {'input': PartialShape([1, 100, 100, 3]), 'mean_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'input': "Input[1,100,100,3]", 'mean_values': "[0.5,1.3,0.67]"}},
|
||||
{'params_test': {'input': [1, 100, 100, 3], 'scale_values': [0.5, 1.3, 0.67]},
|
||||
{'params_test': {'input': [1, 100, 100, 3], 'scale_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True},
|
||||
'params_ref': {'input': "Input[1,100,100,3]", 'scale_values': "[0.5,1.3,0.67]"}},
|
||||
]
|
||||
|
||||
@ -289,24 +294,6 @@ class TestComplexParams(CommonMOConvertTest):
|
||||
ref_params.update({'input_model': tf_net_path})
|
||||
self._test(temp_dir, test_params, ref_params)
|
||||
|
||||
test_data = [
|
||||
{
|
||||
'params_test': {'transform': ('MakeStateful', {'param_res_names': {'Input:0': 'Identity:0'}})},
|
||||
'params_ref': {'transform': "MakeStateful[param_res_names={\'Input:0\':\'Identity:0\'}]"}}
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data)
|
||||
@pytest.mark.nightly
|
||||
def test_mo_convert_transform(self, params, ie_device, precision, ir_version,
|
||||
temp_dir, use_new_frontend, use_old_api):
|
||||
tf_net_path = self.create_tf_param_res_model(temp_dir)
|
||||
|
||||
test_params = params['params_test']
|
||||
ref_params = params['params_ref']
|
||||
test_params.update({'input_model': tf_net_path})
|
||||
ref_params.update({'input_model': tf_net_path})
|
||||
self._test(temp_dir, test_params, ref_params)
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_mo_convert_clearing_transformation_registry(self, ie_device, precision, ir_version,
|
||||
|
@ -10,8 +10,8 @@ import openvino.runtime as ov
|
||||
import pytest
|
||||
import torch
|
||||
import unittest
|
||||
from openvino.runtime import PartialShape, Dimension, Model, Type, InputCutInfo
|
||||
|
||||
from openvino.runtime import PartialShape, Dimension, Model, Type
|
||||
from openvino.tools.ovc import InputCutInfo
|
||||
from common.mo_convert_test_class import CommonMOConvertTest
|
||||
|
||||
|
||||
@ -159,7 +159,7 @@ def create_pytorch_nn_module_case2(tmp_dir):
|
||||
sample_input2 = torch.zeros(1, 3, 10, 10)
|
||||
sample_input = sample_input1, sample_input2
|
||||
|
||||
return pt_model, ref_model, {'input_shape': ["[?,3,?,?]", PartialShape([-1, 3, -1, -1])],
|
||||
return pt_model, ref_model, {'input': [PartialShape("[?,3,?,?]"), PartialShape([-1, 3, -1, -1])],
|
||||
'example_input': sample_input}
|
||||
|
||||
|
||||
@ -171,7 +171,7 @@ def create_pytorch_nn_module_with_scalar_input(tmp_dir):
|
||||
sample_input2 = torch.zeros(1, 3, 10, 10)
|
||||
sample_input = sample_input1, sample_input2
|
||||
|
||||
return pt_model, ref_model, {'input_shape': ["[]", PartialShape([-1, 3, -1, -1])],
|
||||
return pt_model, ref_model, {'input': ["[]", PartialShape([-1, 3, -1, -1])],
|
||||
'example_input': sample_input}
|
||||
|
||||
|
||||
@ -183,7 +183,7 @@ def create_pytorch_nn_module_case3(tmp_dir):
|
||||
sample_input2 = torch.zeros(1, 3, 10, 10)
|
||||
sample_input = tuple([sample_input1, sample_input2])
|
||||
|
||||
return pt_model, ref_model, {'input_shape': "[?,3,?,?],[?,3,?,?]",
|
||||
return pt_model, ref_model, {'input': "[?,3,?,?],[?,3,?,?]",
|
||||
'example_input': sample_input}
|
||||
|
||||
|
||||
@ -194,7 +194,7 @@ def create_pytorch_nn_module_case4(tmp_dir):
|
||||
|
||||
ref_model = make_ref_pt_model_one_input(PartialShape([1, 3, 20, 20]))
|
||||
|
||||
return pt_model, ref_model, {'example_input': sample_input, "input_shape": [1, 3, 20, 20]}
|
||||
return pt_model, ref_model, {'example_input': sample_input, "input": [1, 3, 20, 20]}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_case5(tmp_dir):
|
||||
@ -247,7 +247,7 @@ def create_pytorch_nn_module_sample_input_int32(tmp_dir):
|
||||
|
||||
def create_pytorch_nn_module_sample_input_int32_two_inputs(tmp_dir):
|
||||
pt_model = make_pt_model_two_inputs()
|
||||
inp_shapes = ["[?,3,?,?]", PartialShape([-1, 3, -1, -1])]
|
||||
inp_shapes = [PartialShape("[?,3,?,?]"), PartialShape([-1, 3, -1, -1])]
|
||||
|
||||
sample_input1 = torch.zeros(1, 3, 10, 10, dtype=torch.int32)
|
||||
sample_input2 = torch.zeros(1, 3, 10, 10, dtype=torch.int32)
|
||||
@ -255,8 +255,7 @@ def create_pytorch_nn_module_sample_input_int32_two_inputs(tmp_dir):
|
||||
ref_model = make_ref_pt_model_two_inputs(
|
||||
[PartialShape([-1, 3, -1, -1]), inp_shapes[1]], dtype=np.int32)
|
||||
|
||||
return pt_model, ref_model, {'input_shape': inp_shapes,
|
||||
'input': [np.int32, np.int32],
|
||||
return pt_model, ref_model, {'input': [(np.int32, inp_shapes[0]), (np.int32, inp_shapes[1])],
|
||||
'example_input': sample_input}
|
||||
|
||||
|
||||
@ -293,7 +292,7 @@ def create_pytorch_nn_module_layout_list(tmp_dir):
|
||||
ref_model.inputs[1].node.layout = Layout('nhwc')
|
||||
|
||||
return pt_model, ref_model, {
|
||||
'input_shape': [shape, shape], 'layout': ['nchw', Layout('nhwc')],
|
||||
'input_shape': [shape, shape], 'layout': ['nchw', Layout('nhwc')], 'use_convert_model_from_mo': True
|
||||
}
|
||||
|
||||
|
||||
@ -308,7 +307,7 @@ def create_pytorch_nn_module_layout_list_case2(tmp_dir):
|
||||
ref_model.inputs[1].node.layout = Layout('nhwc')
|
||||
|
||||
return pt_model, ref_model, {
|
||||
'input_shape': [shape, shape], 'layout': ('nchw', Layout('nhwc'))}
|
||||
'input_shape': [shape, shape], 'layout': ('nchw', Layout('nhwc')), 'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_mean_list(tmp_dir):
|
||||
@ -330,7 +329,8 @@ def create_pytorch_nn_module_mean_list(tmp_dir):
|
||||
ref_model = Model([sigm], parameter_list, "test")
|
||||
|
||||
return pt_model, ref_model, {
|
||||
'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], 'compress_to_fp16': False}
|
||||
'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], 'compress_to_fp16': False,
|
||||
'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_mean_list_default_no_compression(tmp_dir):
|
||||
@ -352,7 +352,7 @@ def create_pytorch_nn_module_mean_list_default_no_compression(tmp_dir):
|
||||
parameter_list = [param1, param2]
|
||||
ref_model = Model([sigm], parameter_list, "test")
|
||||
|
||||
return pt_model, ref_model, {'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]]}
|
||||
return pt_model, ref_model, {'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], 'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir):
|
||||
@ -375,7 +375,7 @@ def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir):
|
||||
|
||||
return pt_model, ref_model, {
|
||||
'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]],
|
||||
'compress_to_fp16': False}
|
||||
'compress_to_fp16': False, 'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_scale_list(tmp_dir):
|
||||
@ -396,7 +396,8 @@ def create_pytorch_nn_module_scale_list(tmp_dir):
|
||||
parameter_list = [param1, param2]
|
||||
ref_model = Model([sigm], parameter_list, "test")
|
||||
|
||||
return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], 'compress_to_fp16': False}
|
||||
return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], 'compress_to_fp16': False,
|
||||
'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_scale_list_default_no_compression(tmp_dir):
|
||||
@ -418,7 +419,7 @@ def create_pytorch_nn_module_scale_list_default_no_compression(tmp_dir):
|
||||
parameter_list = [param1, param2]
|
||||
ref_model = Model([sigm], parameter_list, "test")
|
||||
|
||||
return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]]}
|
||||
return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], 'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir):
|
||||
@ -444,14 +445,14 @@ def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir):
|
||||
ref_model = Model([sigm], parameter_list, "test")
|
||||
|
||||
return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]],
|
||||
'compress_to_fp16': True}
|
||||
'compress_to_fp16': True, 'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_shapes_list_static(tmp_dir):
|
||||
pt_model = make_pt_model_two_inputs()
|
||||
ref_model = make_ref_pt_model_two_inputs([1, 3, 20, 20])
|
||||
|
||||
return pt_model, ref_model, {'input_shape': [[1, 3, 20, 20], [1, 3, 20, 20]]}
|
||||
return pt_model, ref_model, {'input': [[1, 3, 20, 20], [1, 3, 20, 20]]}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_shapes_list_static_via_input(tmp_dir):
|
||||
@ -476,7 +477,7 @@ def create_pytorch_nn_module_shapes_list_dynamic(tmp_dir):
|
||||
|
||||
parameter_list = [param1, param2]
|
||||
ref_model = Model([sigm], parameter_list, "test")
|
||||
return pt_model, ref_model, {'input_shape': inp_shapes}
|
||||
return pt_model, ref_model, {'input': inp_shapes}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_shapes_list_dynamic_via_input(tmp_dir):
|
||||
@ -501,7 +502,7 @@ def create_pytorch_nn_module_shapes_list_dynamic_single_input(tmp_dir):
|
||||
pt_model = make_pt_model_one_input()
|
||||
inp_shapes = [[Dimension(-1), 3, 20, Dimension(20, -1)]]
|
||||
ref_model = make_ref_pt_model_one_input(inp_shapes[0])
|
||||
return pt_model, ref_model, {'input_shape': inp_shapes}
|
||||
return pt_model, ref_model, {'input': inp_shapes}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_shapes_list_dynamic_single_input_via_input(tmp_dir):
|
||||
@ -515,7 +516,7 @@ def create_pytorch_nn_module_shapes_list_static_single_input(tmp_dir):
|
||||
pt_model = make_pt_model_one_input()
|
||||
inp_shapes = [[1, 3, 20, 20]]
|
||||
ref_model = make_ref_pt_model_one_input(inp_shapes[0])
|
||||
return pt_model, ref_model, {'input_shape': inp_shapes}
|
||||
return pt_model, ref_model, {'input': inp_shapes}
|
||||
|
||||
|
||||
def create_pytorch_nn_module_shapes_list_static_single_input_via_input(tmp_dir):
|
||||
@ -677,7 +678,7 @@ def create_pytorch_module_with_optional_inputs_case3(tmp_dir):
|
||||
(1, 3, 10, 10)), "z": torch.ones((1, 3, 10, 10))}
|
||||
ref_model = make_ref_pt_model_with_optional_inputs(
|
||||
[3, 3, 3, 3], z_exist=True)
|
||||
return net, ref_model, {"example_input": example_input, "input_shape": [[3, 3, 3, 3], [3, 3, 3, 3]]}
|
||||
return net, ref_model, {"example_input": example_input, "input": [[3, 3, 3, 3], [3, 3, 3, 3]]}
|
||||
|
||||
|
||||
def create_pytorch_module_with_optional_inputs_case4(tmp_dir):
|
||||
@ -691,7 +692,7 @@ def create_pytorch_module_with_optional_inputs_case5(tmp_dir):
|
||||
net = make_pt_model_with_optional_input()
|
||||
ref_model = make_ref_pt_model_with_optional_inputs(
|
||||
[1, 3, -1, -1], z_exist=True)
|
||||
return net, ref_model, {"input": ["x", "z"], "input_shape": [[1, 3, -1, -1], [1, 3, -1, -1]]}
|
||||
return net, ref_model, {"input": [("x",[1, 3, -1, -1]), ("z", [1, 3, -1, -1])]}
|
||||
|
||||
|
||||
def create_pytorch_module_with_compressed_int8_constant(tmp_dir):
|
||||
@ -956,11 +957,11 @@ def create_pt_model_with_custom_op():
|
||||
|
||||
class ConvertRaises(unittest.TestCase):
|
||||
def test_example_inputs(self):
|
||||
from openvino.runtime import convert_model
|
||||
from openvino.tools.ovc import convert_model
|
||||
pytorch_model = create_pt_model_with_custom_op()
|
||||
|
||||
# Check that mo raises error message of wrong argument.
|
||||
with self.assertRaisesRegex(AssertionError, ".*argument is not recognized.*"):
|
||||
with self.assertRaisesRegex(TypeError, ".*got an unexpected keyword argument 'example_inputs'.*"):
|
||||
convert_model(pytorch_model, example_inputs=(torch.tensor(1),))
|
||||
|
||||
def test_failed_extension(self):
|
||||
|
@ -139,7 +139,7 @@ def create_tf_module(tmp_dir):
|
||||
model_ref = Model([sigm], parameter_list, "test")
|
||||
|
||||
net = Net()
|
||||
return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]}
|
||||
return net, model_ref, {'input': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]}
|
||||
|
||||
|
||||
def create_tf_module_layout_list(tmp_dir):
|
||||
@ -166,7 +166,8 @@ def create_tf_module_layout_list(tmp_dir):
|
||||
model_ref.inputs[1].node.layout = Layout('NHC')
|
||||
|
||||
net = Net()
|
||||
return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])], 'layout': ["NCH", "NHC"]}
|
||||
return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])], 'layout': ["NCH", "NHC"],
|
||||
'use_convert_model_from_mo': True}
|
||||
|
||||
|
||||
def create_tf_module_dynamic(tmp_dir):
|
||||
@ -192,7 +193,7 @@ def create_tf_module_dynamic(tmp_dir):
|
||||
model_ref = Model([sigm], parameter_list, "test")
|
||||
|
||||
net = Net()
|
||||
return net, model_ref, {'input_shape': input_shapes}
|
||||
return net, model_ref, {'input': input_shapes}
|
||||
|
||||
|
||||
def create_keras_layer(tmp_dir):
|
||||
@ -216,7 +217,7 @@ def create_keras_layer(tmp_dir):
|
||||
model_ref = Model([sigm], parameter_list, "test")
|
||||
|
||||
net = LayerModel()
|
||||
return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]}
|
||||
return net, model_ref, {'input': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]}
|
||||
|
||||
|
||||
def create_keras_layer_dynamic(tmp_dir):
|
||||
@ -242,7 +243,7 @@ def create_keras_layer_dynamic(tmp_dir):
|
||||
model_ref = Model([sigm], parameter_list, "test")
|
||||
|
||||
net = LayerModel()
|
||||
return net, model_ref, {'input_shape': input_shapes}
|
||||
return net, model_ref, {'input': input_shapes}
|
||||
|
||||
|
||||
def create_tf_checkpoint(tmp_dir):
|
||||
@ -518,7 +519,7 @@ def create_keras_layer_with_example_input_2(tmp_dir):
|
||||
|
||||
def create_keras_layer_with_input_shapes_case1(tmp_dir):
|
||||
model, model_ref = create_keras_layer_input_list()
|
||||
return model, model_ref, {'input_shape': [[1, 2, 3], [1, 2, 3]]}
|
||||
return model, model_ref, {'input': [[1, 2, 3], [1, 2, 3]]}
|
||||
|
||||
|
||||
def create_keras_layer_with_input_shapes_case2(tmp_dir):
|
||||
@ -528,7 +529,7 @@ def create_keras_layer_with_input_shapes_case2(tmp_dir):
|
||||
|
||||
def create_keras_layer_with_input_shapes_case3(tmp_dir):
|
||||
model, model_ref = create_keras_layer_input_dict_one_inp()
|
||||
return model, model_ref, {'input': ['args'], 'input_shape': [1, 2, 3]}
|
||||
return model, model_ref, {'input': [('args', [1, 2, 3])]}
|
||||
|
||||
|
||||
def create_keras_layer_with_input_shapes_case4(tmp_dir):
|
||||
@ -669,7 +670,7 @@ class TestMoConvertTF(CommonMOConvertTest):
|
||||
temp_dir):
|
||||
fw_model, graph_ref, mo_params = create_model(temp_dir)
|
||||
|
||||
test_params = {'input_model': fw_model, 'use_new_frontend': True}
|
||||
test_params = {'input_model': fw_model}
|
||||
if mo_params is not None:
|
||||
test_params.update(mo_params)
|
||||
self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False)
|
||||
@ -679,10 +680,10 @@ class TestMoConvertTF(CommonMOConvertTest):
|
||||
def test_unnamed_saved_model_dir(self, ie_device, precision, ir_version, temp_dir):
|
||||
saved_model_dir, graph_ref = create_tf_saved_model_dir(temp_dir)
|
||||
|
||||
test_params = {'input_model': saved_model_dir, 'use_new_frontend': True}
|
||||
test_params = {'input_model': saved_model_dir}
|
||||
self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False)
|
||||
|
||||
test_params = {'input_model': saved_model_dir, 'use_new_frontend': False}
|
||||
test_params = {'input_model': saved_model_dir}
|
||||
self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False)
|
||||
|
||||
def test_zero_copy(self, ie_device, precision, ir_version, temp_dir):
|
||||
@ -741,7 +742,7 @@ class TestMoConvertTF(CommonMOConvertTest):
|
||||
import tensorflow as tf
|
||||
tf.compat.v1.reset_default_graph()
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
from openvino.tools.ovc import convert_model
|
||||
from openvino.runtime import compile_model
|
||||
import gc
|
||||
|
||||
@ -795,7 +796,7 @@ class TFConvertTest(unittest.TestCase):
|
||||
@pytest.mark.precommit
|
||||
def test_tf_function_no_signature(self):
|
||||
import tensorflow as tf
|
||||
from openvino.runtime import convert_model
|
||||
from openvino.tools.ovc import convert_model
|
||||
|
||||
@tf.function()
|
||||
def function(x1, x2):
|
||||
|
@ -5,8 +5,9 @@
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
from openvino.tools.mo import mo
|
||||
from openvino.tools.ovc import ovc
|
||||
from openvino.tools.ovc.cli_parser import get_mo_convert_params
|
||||
from openvino.tools.mo.utils.cli_parser import get_mo_convert_params as legacy_mo_params
|
||||
from pathlib import Path
|
||||
|
||||
from common.utils.common_utils import shell
|
||||
@ -16,8 +17,8 @@ class TestSubprocessMoConvert(unittest.TestCase):
|
||||
def test_mo_convert(self):
|
||||
mo_convert_params = get_mo_convert_params()
|
||||
|
||||
# Test cli tool help
|
||||
mo_path = Path(mo.__file__).parent
|
||||
# Test ovc tool help
|
||||
mo_path = Path(ovc.__file__).parent
|
||||
mo_runner = mo_path.joinpath('main.py').as_posix()
|
||||
params = [sys.executable, mo_runner, "--help"]
|
||||
_, mo_output, _ = shell(params)
|
||||
@ -29,11 +30,12 @@ class TestSubprocessMoConvert(unittest.TestCase):
|
||||
for param_name in group:
|
||||
assert param_name in mo_output
|
||||
|
||||
# Test Python API help
|
||||
# Test Python API help, applicable for convert_model from tools.mo only
|
||||
mo_help_file = os.path.join(os.path.dirname(__file__), "mo_convert_help.py")
|
||||
params = [sys.executable, mo_help_file]
|
||||
_, mo_output, _ = shell(params)
|
||||
|
||||
for group in mo_convert_params:
|
||||
legacy_params = legacy_mo_params()
|
||||
for group in legacy_params:
|
||||
for param_name in group:
|
||||
assert param_name in mo_output
|
||||
|
@ -27,7 +27,9 @@ def generate_ir_ovc(coverage=False, **kwargs):
|
||||
else:
|
||||
params = [sys.executable, ovc_runner]
|
||||
for key, value in kwargs.items():
|
||||
if key == "batch":
|
||||
if key == "input_model":
|
||||
params.append((str(value)))
|
||||
elif key == "batch":
|
||||
params.extend(("-b", str(value)))
|
||||
elif key == "k":
|
||||
params.extend(("-k", str(value)))
|
||||
@ -81,7 +83,7 @@ class TestOVCTool(CommonMOConvertTest):
|
||||
core = Core()
|
||||
|
||||
# tests for MO cli tool
|
||||
exit_code, stderr = generate_ir_ovc(coverage=False, **{"input_model": model_path, "output_dir": temp_dir})
|
||||
exit_code, stderr = generate_ir_ovc(coverage=False, **{"input_model": model_path, "output_model": temp_dir + os.sep + "model"})
|
||||
assert not exit_code
|
||||
|
||||
ov_model = core.read_model(os.path.join(temp_dir, "model.xml"))
|
||||
|
@ -148,7 +148,7 @@ class PytorchLayerTest:
|
||||
|
||||
def convert_via_mo(self, model, example_input, trace_model, dynamic_shapes, ov_inputs):
|
||||
import torch
|
||||
from openvino.runtime import convert_model
|
||||
from openvino.tools.ovc import convert_model
|
||||
kwargs = {"example_input": example_input if len(
|
||||
example_input) > 1 else example_input[0], "compress_to_fp16": False}
|
||||
with torch.no_grad():
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from openvino.tools.mo.convert import convert_model
|
||||
from openvino.tools.ovc import InputCutInfo, LayoutMap # pylint: disable=no-name-in-module,import-error
|
||||
from openvino.tools.mo.convert import convert_model, LayoutMap, InputCutInfo
|
||||
|
@ -2,14 +2,17 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import os
|
||||
import pathlib
|
||||
from collections import namedtuple
|
||||
from typing import Any
|
||||
|
||||
from openvino.runtime import PartialShape, Shape, Layout, Model
|
||||
from openvino.tools.mo.convert_impl import _convert
|
||||
from openvino.tools.ovc import InputCutInfo, LayoutMap # pylint: disable=no-name-in-module,import-error
|
||||
from openvino.tools.mo.utils.cli_parser import get_all_cli_parser # pylint: disable=no-name-in-module,import-error
|
||||
from openvino.tools.mo.utils.logger import get_logger_state, restore_logger_state # pylint: disable=no-name-in-module,import-error
|
||||
|
||||
LayoutMap = namedtuple("LayoutMap", ["source_layout", "target_layout"], defaults=[None, None])
|
||||
InputCutInfo = namedtuple("InputInfo", ["name", "shape", "type", "value"], defaults=[None, None, None, None])
|
||||
|
||||
|
||||
def convert_model(
|
||||
input_model: [str, pathlib.Path, Any] = None,
|
||||
|
@ -147,13 +147,13 @@ def single_input_to_input_cut_info(input: [str, tuple, list, PartialShape, Type,
|
||||
if isinstance(input, str):
|
||||
# Parse params from string
|
||||
node_name, shape, value, data_type = parse_input_value(input)
|
||||
return openvino.runtime.InputCutInfo(node_name,
|
||||
return openvino.tools.mo.InputCutInfo(node_name,
|
||||
PartialShape(shape) if shape is not None else None,
|
||||
data_type,
|
||||
value)
|
||||
if isinstance(input, openvino.runtime.InputCutInfo):
|
||||
if isinstance(input, openvino.tools.mo.InputCutInfo):
|
||||
# Wrap input.shape to PartialShape if possible and wrap to InputCutInfo
|
||||
return openvino.runtime.InputCutInfo(input.name,
|
||||
return openvino.tools.mo.InputCutInfo(input.name,
|
||||
PartialShape(input.shape) if input.shape is not None else None,
|
||||
input.type,
|
||||
input.value)
|
||||
@ -183,18 +183,18 @@ def single_input_to_input_cut_info(input: [str, tuple, list, PartialShape, Type,
|
||||
else:
|
||||
raise Exception("Incorrect input parameters provided. Expected tuple with input name, "
|
||||
"input type or input shape. Got unknown object: {}".format(val))
|
||||
return openvino.runtime.InputCutInfo(name,
|
||||
return openvino.tools.mo.InputCutInfo(name,
|
||||
PartialShape(shape) if shape is not None else None,
|
||||
inp_type,
|
||||
None)
|
||||
# Case when only type is set
|
||||
if isinstance(input, (type, Type)):
|
||||
return openvino.runtime.InputCutInfo(None, None, input, None)
|
||||
return openvino.tools.mo.InputCutInfo(None, None, input, None)
|
||||
|
||||
# We don't expect here single unnamed value. If list of int is set it is considered as shape.
|
||||
# Setting of value is expected only using InputCutInfo or string analog.
|
||||
|
||||
raise Exception("Unexpected object provided for input. Expected openvino.runtime.InputCutInfo "
|
||||
raise Exception("Unexpected object provided for input. Expected openvino.toos.mo.InputCutInfo "
|
||||
"or tuple or str. Got {}".format(type(input)))
|
||||
|
||||
|
||||
@ -213,12 +213,12 @@ def input_to_input_cut_info(input: [str, tuple, list]):
|
||||
|
||||
# Parse string with parameters for single input
|
||||
node_name, shape, value, data_type = parse_input_value(input_value)
|
||||
inputs.append(openvino.runtime.InputCutInfo(node_name,
|
||||
inputs.append(openvino.tools.mo.InputCutInfo(node_name,
|
||||
PartialShape(shape) if shape is not None else None,
|
||||
data_type,
|
||||
value))
|
||||
return inputs
|
||||
if isinstance(input, openvino.runtime.InputCutInfo):
|
||||
if isinstance(input, openvino.tools.mo.InputCutInfo):
|
||||
# Wrap to list and return
|
||||
return [input]
|
||||
if isinstance(input, tuple):
|
||||
@ -269,11 +269,11 @@ def input_shape_to_input_cut_info(input_shape: [str, Shape, PartialShape, list,
|
||||
shape = PartialShape(shape)
|
||||
assert inputs[idx].shape is None, "Shape was set in both \"input\" and in \"input_shape\" parameter." \
|
||||
"Please use either \"input\" or \"input_shape\" for shape setting."
|
||||
inputs[idx] = openvino.runtime.InputCutInfo(inputs[idx].name, shape, inputs[idx].type, inputs[idx].value)
|
||||
inputs[idx] = openvino.tools.mo.InputCutInfo(inputs[idx].name, shape, inputs[idx].type, inputs[idx].value)
|
||||
|
||||
else:
|
||||
for shape in input_shape:
|
||||
inputs.append(openvino.runtime.InputCutInfo(None, PartialShape(shape), None, None))
|
||||
inputs.append(openvino.tools.mo.InputCutInfo(None, PartialShape(shape), None, None))
|
||||
return
|
||||
|
||||
raise Exception("Unexpected object provided for input_shape. Expected PartialShape, Shape, tuple, list or str. "
|
||||
@ -375,7 +375,7 @@ def source_target_layout_to_str(value):
|
||||
def layoutmap_to_str(value):
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
if isinstance(value, openvino.runtime.LayoutMap):
|
||||
if isinstance(value, openvino.tools.mo.LayoutMap):
|
||||
assert value.source_layout is not None, "Incorrect layout map. 'source_layout' should be set."
|
||||
source_layout = layout_to_str(value.source_layout)
|
||||
if value.target_layout is not None:
|
||||
@ -400,7 +400,7 @@ def layout_param_to_str(value):
|
||||
raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name)))
|
||||
values_str.append(op_name + "(" + layoutmap_to_str(layout) + ")")
|
||||
return ",".join(values_str)
|
||||
if isinstance(value, openvino.runtime.LayoutMap):
|
||||
if isinstance(value, openvino.tools.mo.LayoutMap):
|
||||
return layoutmap_to_str(value)
|
||||
if isinstance(value, list) or isinstance(value, tuple):
|
||||
values_str = []
|
||||
@ -490,7 +490,7 @@ ParamDescription = namedtuple("ParamData",
|
||||
|
||||
|
||||
def get_mo_convert_params():
|
||||
mo_convert_docs = openvino.runtime.convert_model.__doc__
|
||||
mo_convert_docs = openvino.tools.mo.convert_model.__doc__
|
||||
mo_convert_params = {}
|
||||
group = "Optional parameters:"
|
||||
mo_convert_params[group] = {}
|
||||
@ -784,7 +784,7 @@ def writable_dir(path: str):
|
||||
|
||||
|
||||
def add_args_by_description(args_group, params_description):
|
||||
signature = inspect.signature(openvino.runtime.convert_model)
|
||||
signature = inspect.signature(openvino.tools.mo.convert_model)
|
||||
filepath_args = get_params_with_paths_list()
|
||||
cli_tool_specific_descriptions = get_convert_model_help_specifics()
|
||||
for param_name, param_description in params_description.items():
|
||||
|
@ -2,9 +2,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
from openvino.runtime import Layout, PartialShape, Dimension, Shape, Type
|
||||
from openvino.runtime import Layout, Dimension
|
||||
|
||||
from openvino.runtime import InputCutInfo, LayoutMap
|
||||
from openvino.tools.mo import LayoutMap
|
||||
from openvino.tools.mo.utils.cli_parser import mean_scale_value_to_str, \
|
||||
transform_param_to_str, str_list_to_str, source_target_layout_to_str, layout_param_to_str
|
||||
from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry
|
||||
|
@ -20,7 +20,8 @@ from openvino.tools.mo.utils.cli_parser import get_placeholder_shapes, get_tuple
|
||||
from openvino.tools.mo.convert_impl import pack_params_to_args_namespace
|
||||
from openvino.tools.mo.utils.error import Error
|
||||
from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry
|
||||
from openvino.runtime import PartialShape, Dimension, Layout, InputCutInfo, LayoutMap
|
||||
from openvino.runtime import PartialShape, Dimension, Layout
|
||||
from openvino.tools.mo import LayoutMap, InputCutInfo
|
||||
|
||||
|
||||
class TestingMeanScaleGetter(UnitTestWithMockedTelemetry):
|
||||
|
@ -1,13 +1,4 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from openvino.tools.ovc.convert import convert_model, InputCutInfo, LayoutMap
|
||||
|
||||
# pylint: disable=no-name-in-module,import-error,no-member
|
||||
try:
|
||||
import openvino.runtime
|
||||
openvino.runtime.convert_model = convert_model
|
||||
openvino.runtime.InputCutInfo = InputCutInfo
|
||||
openvino.runtime.LayoutMap = LayoutMap
|
||||
except:
|
||||
pass
|
||||
from openvino.tools.ovc.convert import convert_model, InputCutInfo
|
File diff suppressed because it is too large
Load Diff
@ -6,105 +6,42 @@ import pathlib
|
||||
from collections import namedtuple
|
||||
from typing import Any
|
||||
|
||||
from openvino.runtime import PartialShape, Shape, Layout, Model # pylint: disable=no-name-in-module,import-error
|
||||
from openvino.runtime import PartialShape, Shape, Model # pylint: disable=no-name-in-module,import-error
|
||||
|
||||
from openvino.tools.ovc.convert_impl import _convert
|
||||
from openvino.tools.ovc.logger import get_logger_state, restore_logger_state
|
||||
from openvino.tools.ovc.cli_parser import get_all_cli_parser
|
||||
|
||||
#TODO: Why names InputCutInfo and InputInfo are different
|
||||
InputCutInfo = namedtuple("InputInfo", ["name", "shape", "type", "value"], defaults=[None, None, None, None])
|
||||
LayoutMap = namedtuple("LayoutMap", ["source_layout", "target_layout"], defaults=[None, None])
|
||||
|
||||
|
||||
def convert_model(
|
||||
input_model: [str, pathlib.Path, Any] = None,
|
||||
|
||||
# Optional parameters
|
||||
help: bool = False,
|
||||
framework: [str] = None,
|
||||
input_model: [str, pathlib.Path, Any, list], # TODO: Instead of list just accept arbitrary number of positional arguments
|
||||
|
||||
# Framework-agnostic parameters
|
||||
input: [str, list, tuple, InputCutInfo] = None,
|
||||
output: [str, list] = None,
|
||||
input_shape: [str, PartialShape, Shape, list] = None,
|
||||
example_input: Any = None,
|
||||
batch: int = None,
|
||||
mean_values: [str, dict, list] = (),
|
||||
scale_values: [str, dict, list] = (),
|
||||
scale: [str, float] = None,
|
||||
reverse_input_channels: bool = False,
|
||||
source_layout: [str, Layout, dict] = (),
|
||||
target_layout: [str, Layout, dict] = (),
|
||||
layout: [str, Layout, LayoutMap, list, dict] = (),
|
||||
compress_to_fp16: bool = False,
|
||||
extensions: [str, pathlib.Path, list, Any] = None,
|
||||
transform: [str, list, tuple] = "",
|
||||
transformations_config: [str, pathlib.Path] = None,
|
||||
silent: bool = True,
|
||||
log_level: str = 'ERROR',
|
||||
version: bool = None,
|
||||
progress: bool = False,
|
||||
stream_output: bool = False,
|
||||
verbose: bool = False,
|
||||
share_weights: bool = True,
|
||||
|
||||
# PaddlePaddle-specific parameters:
|
||||
example_output: Any = None,
|
||||
example_output: Any = None, # TODO: Consider removing
|
||||
|
||||
# TensorFlow*-specific parameters
|
||||
input_model_is_text: bool = None,
|
||||
input_checkpoint: [str, pathlib.Path] = None,
|
||||
input_meta_graph: [str, pathlib.Path] = None,
|
||||
saved_model_dir: [str, pathlib.Path] = None,
|
||||
saved_model_tags: [str, list] = None,
|
||||
tensorflow_custom_operations_config_update: [str, pathlib.Path] = None,
|
||||
tensorflow_object_detection_api_pipeline_config: [str, pathlib.Path] = None,
|
||||
tensorboard_logdir: [str, pathlib.Path] = None,
|
||||
tensorflow_custom_layer_libraries: [str, pathlib.Path] = None,
|
||||
|
||||
# MXNet-specific parameters:
|
||||
input_symbol: [str, pathlib.Path] = None,
|
||||
nd_prefix_name: str = None,
|
||||
pretrained_model_name: str = None,
|
||||
save_params_from_nd: bool = None,
|
||||
legacy_mxnet_model: bool = None,
|
||||
enable_ssd_gluoncv: bool = False,
|
||||
|
||||
# Caffe*-specific parameters:
|
||||
input_proto: [str, pathlib.Path] = None,
|
||||
caffe_parser_path: [str, pathlib.Path] = None,
|
||||
k: [str, pathlib.Path] = None,
|
||||
disable_omitting_optional: bool = False,
|
||||
enable_flattening_nested_params: bool = False,
|
||||
|
||||
# Kaldi-specific parameters:
|
||||
counts: [str, pathlib.Path] = None,
|
||||
remove_output_softmax: bool = False,
|
||||
remove_memory: bool = False,
|
||||
|
||||
**args
|
||||
saved_model_tags: [str, list] = None, # TODO: Consider removing
|
||||
) -> Model:
|
||||
"""
|
||||
Converts the model from original framework to OpenVino Model.
|
||||
|
||||
Args:
|
||||
:param help:
|
||||
Print available parameters.
|
||||
:param framework:
|
||||
Name of the framework used to train the input model.
|
||||
|
||||
Framework-agnostic parameters:
|
||||
:param input_model:
|
||||
Model object in original framework (PyTorch, Tensorflow) or path to model file.
|
||||
Tensorflow*: a file with a pre-trained model (binary or text .pb file after freezing).
|
||||
Caffe*: a model proto file with model weights
|
||||
|
||||
Supported formats of input model:
|
||||
|
||||
PaddlePaddle
|
||||
paddle.hapi.model.Model
|
||||
paddle.fluid.dygraph.layers.Layer
|
||||
paddle.fluid.executor.Executor
|
||||
|
||||
PyTorch
|
||||
torch.nn.Module
|
||||
torch.jit.ScriptModule
|
||||
@ -123,6 +60,11 @@ def convert_model(
|
||||
tf.Module
|
||||
tf.train.checkpoint
|
||||
|
||||
PaddlePaddle
|
||||
paddle.hapi.model.Model
|
||||
paddle.fluid.dygraph.layers.Layer
|
||||
paddle.fluid.executor.Executor
|
||||
|
||||
:param input:
|
||||
Input can be set by passing a list of InputCutInfo objects or by a list
|
||||
of tuples. Each tuple can contain optionally input name, input
|
||||
@ -149,94 +91,11 @@ def convert_model(
|
||||
The name of the output operation of the model or list of names. For TensorFlow*,
|
||||
do not add :0 to this name.The order of outputs in converted model is the
|
||||
same as order of specified operation names.
|
||||
:param input_shape:
|
||||
Input shape(s) that should be fed to an input node(s) of the model. Input
|
||||
shapes can be defined by passing a list of objects of type PartialShape,
|
||||
Shape, [Dimension, ...] or [int, ...] or by a string of the following
|
||||
format. Shape is defined as a comma-separated list of integer numbers
|
||||
enclosed in parentheses or square brackets, for example [1,3,227,227]
|
||||
or (1,227,227,3), where the order of dimensions depends on the framework
|
||||
input layout of the model. For example, [N,C,H,W] is used for ONNX* models
|
||||
and [N,H,W,C] for TensorFlow* models. The shape can contain undefined
|
||||
dimensions (? or -1) and should fit the dimensions defined in the input
|
||||
operation of the graph. Boundaries of undefined dimension can be specified
|
||||
with ellipsis, for example [1,1..10,128,128]. One boundary can be
|
||||
undefined, for example [1,..100] or [1,3,1..,1..]. If there are multiple
|
||||
inputs in the model, "input_shape" should contain definition of shape
|
||||
for each input separated by a comma, for example: [1,3,227,227],[2,4]
|
||||
for a model with two inputs with 4D and 2D shapes. Alternatively, specify
|
||||
shapes with the "input" option.
|
||||
:param example_input:
|
||||
Sample of model input in original framework.
|
||||
For PyTorch it can be torch.Tensor.
|
||||
For Tensorflow it can be tf.Tensor or numpy.ndarray.
|
||||
For PaddlePaddle it can be Paddle Variable.
|
||||
:param batch:
|
||||
Set batch size. It applies to 1D or higher dimension inputs.
|
||||
The default dimension index for the batch is zero.
|
||||
Use a label 'n' in "layout" or "source_layout" option to set the batch dimension.
|
||||
For example, "x(hwnc)" defines the third dimension to be the batch.
|
||||
:param mean_values:
|
||||
Mean values to be used for the input image per channel. Mean values can
|
||||
be set by passing a dictionary, where key is input name and value is mean
|
||||
value. For example mean_values={'data':[255,255,255],'info':[255,255,255]}.
|
||||
Or mean values can be set by a string of the following format. Values to
|
||||
be provided in the (R,G,B) or [R,G,B] format. Can be defined for desired
|
||||
input of the model, for example: mean_values="data[255,255,255],info[255,255,255]".
|
||||
The exact meaning and order of channels depend on how the original model
|
||||
was trained.
|
||||
:param scale_values:
|
||||
Scale values to be used for the input image per channel. Scale values
|
||||
can be set by passing a dictionary, where key is input name and value is
|
||||
scale value. For example scale_values={'data':[255,255,255],'info':[255,255,255]}.
|
||||
Or scale values can be set by a string of the following format. Values
|
||||
are provided in the (R,G,B) or [R,G,B] format. Can be defined for desired
|
||||
input of the model, for example: scale_values="data[255,255,255],info[255,255,255]".
|
||||
The exact meaning and order of channels depend on how the original model
|
||||
was trained. If both "mean_values" and "scale_values" are specified,
|
||||
the mean is subtracted first and then scale is applied regardless of
|
||||
the order of options in command line.
|
||||
:param scale:
|
||||
All input values coming from original network inputs will be divided
|
||||
by this value. When a list of inputs is overridden by the "input" parameter,
|
||||
this scale is not applied for any input that does not match with the original
|
||||
input of the model. If both "mean_values" and "scale" are specified,
|
||||
the mean is subtracted first and then scale is applied regardless of
|
||||
the order of options in command line.
|
||||
:param reverse_input_channels:
|
||||
Switch the input channels order from RGB to BGR (or vice versa). Applied
|
||||
to original inputs of the model if and only if a number of channels equals
|
||||
3. When "mean_values"/"scale_values" are also specified, reversing
|
||||
of channels will be applied to user's input data first, so that numbers
|
||||
in "mean_values" and "scale_values" go in the order of channels used
|
||||
in the original model. In other words, if both options are specified,
|
||||
then the data flow in the model looks as following: Parameter -> ReverseInputChannels
|
||||
-> Mean apply-> Scale apply -> the original body of the model.
|
||||
:param source_layout:
|
||||
Layout of the input or output of the model in the framework. Layout can
|
||||
be set by passing a dictionary, where key is input name and value is LayoutMap
|
||||
object. Or layout can be set by string of the following format. Layout
|
||||
can be specified in the short form, e.g. nhwc, or in complex form, e.g.
|
||||
"[n,h,w,c]". Example for many names: "in_name1([n,h,w,c]),in_name2(nc),out_name1(n),out_name2(nc)".
|
||||
Layout can be partially defined, "?" can be used to specify undefined
|
||||
layout for one dimension, "..." can be used to specify undefined layout
|
||||
for multiple dimensions, for example "?c??", "nc...", "n...c", etc.
|
||||
:param target_layout:
|
||||
Same as "source_layout", but specifies target layout that will be in
|
||||
the model after processing by ModelOptimizer.
|
||||
:param layout:
|
||||
Combination of "source_layout" and "target_layout". Can't be used
|
||||
with either of them. If model has one input it is sufficient to specify
|
||||
layout of this input, for example "layout" nhwc. To specify layouts
|
||||
of many tensors, names must be provided, for example: layout="name1(nchw),name2(nc)".
|
||||
It is possible to instruct ModelOptimizer to change layout, for example:
|
||||
layout="name1(nhwc->nchw),name2(cn->nc)".
|
||||
Also "*" in long layout form can be used to fuse dimensions, for example "[n,c,...]->[n*c,...]".
|
||||
:param compress_to_fp16:
|
||||
If the original model has FP32 weights or biases, they are compressed
|
||||
to FP16. All intermediate data is kept in original precision. Option
|
||||
can be specified alone as "compress_to_fp16", or explicit True/False
|
||||
values can be set, for example: "compress_to_fp16=False", or "compress_to_fp16=True"
|
||||
:param extensions:
|
||||
Paths to libraries (.so or .dll) with extensions, comma-separated
|
||||
list of paths, objects derived from BaseExtension class or lists of
|
||||
@ -244,124 +103,28 @@ def convert_model(
|
||||
a directory or a comma-separated list of directories with extensions
|
||||
are supported. To disable all extensions including those that are placed
|
||||
at the default location, pass an empty string.
|
||||
:param transform:
|
||||
Apply additional transformations. 'transform' can be set by a list
|
||||
of tuples, where the first element is transform name and the second element
|
||||
is transform parameters. For example: [('LowLatency2', {{'use_const_initializer':
|
||||
False}}), ...] transform="transformation_name1[args],transformation_name2..."
|
||||
where [args] is key=value pairs separated by semicolon. Examples:
|
||||
transform="LowLatency2" or
|
||||
transform="Pruning" or
|
||||
transform="LowLatency2[use_const_initializer=False]" or
|
||||
transform="MakeStateful[param_res_names=
|
||||
{'input_name_1':'output_name_1','input_name_2':'output_name_2'}]"
|
||||
Available transformations: "LowLatency2", "MakeStateful", "Pruning"
|
||||
:param transformations_config:
|
||||
Use the configuration file with transformations description or pass
|
||||
object derived from BaseExtension class. Transformations file can
|
||||
be specified as relative path from the current directory, as absolute
|
||||
path or as relative path from the mo root directory.
|
||||
:param silent:
|
||||
Prevent any output messages except those that correspond to log level
|
||||
equals ERROR, that can be set with the following option: "log_level".
|
||||
By default, log level is already ERROR.
|
||||
:param log_level:
|
||||
Logger level of logging massages from MO.
|
||||
Expected one of ['CRITICAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'].
|
||||
:param version:
|
||||
Version of Model Conversion API
|
||||
:param progress:
|
||||
Enable model conversion progress display.
|
||||
:param stream_output:
|
||||
Switch model conversion progress display to a multiline mode.
|
||||
:param verbose:
|
||||
Print detailed information about conversion.
|
||||
:param share_weights:
|
||||
Map memory of weights instead reading files or share memory from input model.
|
||||
Currently, mapping feature is provided only for ONNX models
|
||||
that do not require fallback to the legacy ONNX frontend for the conversion.
|
||||
Reuse weights allocated in the original model. If input model is in file,
|
||||
then mmap is used to allocate weights directly from file. If input model is
|
||||
runtime object, then original memory regions allocated in the original model
|
||||
are reused for weights in the converted model.
|
||||
|
||||
PaddlePaddle-specific parameters:
|
||||
:param example_output:
|
||||
Sample of model output in original framework. For PaddlePaddle it can be Paddle Variable.
|
||||
|
||||
TensorFlow*-specific parameters:
|
||||
:param input_model_is_text:
|
||||
TensorFlow*: treat the input model file as a text protobuf format. If
|
||||
not specified, the convert_model() treats it as a binary file by default.
|
||||
:param input_checkpoint:
|
||||
TensorFlow*: variables file to load.
|
||||
:param input_meta_graph:
|
||||
Tensorflow*: a file with a meta-graph of the model before freezing
|
||||
:param saved_model_dir:
|
||||
TensorFlow*: directory with a model in SavedModel format of TensorFlow
|
||||
1.x or 2.x version.
|
||||
:param saved_model_tags:
|
||||
Group of tag(s) of the MetaGraphDef to load, in string format, separated
|
||||
by ','. For tag-set contains multiple tags, all tags must be passed in.
|
||||
:param tensorflow_custom_operations_config_update:
|
||||
TensorFlow*: update the configuration file with node name patterns
|
||||
with input/output nodes information.
|
||||
:param tensorflow_object_detection_api_pipeline_config:
|
||||
TensorFlow*: path to the pipeline configuration file used to generate
|
||||
model created with help of Object Detection API.
|
||||
:param tensorboard_logdir:
|
||||
TensorFlow*: dump the input graph to a given directory that should be
|
||||
used with TensorBoard.
|
||||
:param tensorflow_custom_layer_libraries:
|
||||
TensorFlow*: comma separated list of shared libraries with TensorFlow*
|
||||
custom operations implementation.
|
||||
|
||||
MXNet-specific parameters:
|
||||
:param input_symbol:
|
||||
Symbol file (for example, model-symbol.json) that contains a topology
|
||||
structure and layer attributes
|
||||
:param nd_prefix_name:
|
||||
Prefix name for args.nd and argx.nd files.
|
||||
:param pretrained_model_name:
|
||||
Name of a pretrained MXNet model without extension and epoch number.
|
||||
This model will be merged with args.nd and argx.nd files
|
||||
:param save_params_from_nd:
|
||||
Enable saving built parameters file from .nd files
|
||||
:param legacy_mxnet_model:
|
||||
Enable MXNet loader to make a model compatible with the latest MXNet
|
||||
version. Use only if your model was trained with MXNet version lower
|
||||
than 1.0.0
|
||||
:param enable_ssd_gluoncv:
|
||||
Enable pattern matchers replacers for converting gluoncv ssd topologies.
|
||||
|
||||
Caffe*-specific parameters:
|
||||
:param input_proto:
|
||||
Deploy-ready prototxt file that contains a topology structure and
|
||||
layer attributes
|
||||
:param caffe_parser_path:
|
||||
Path to Python Caffe* parser generated from caffe.proto
|
||||
:param k:
|
||||
Path to CustomLayersMapping.xml to register custom layers
|
||||
:param disable_omitting_optional:
|
||||
Disable omitting optional attributes to be used for custom layers.
|
||||
Use this option if you want to transfer all attributes of a custom layer
|
||||
to IR. Default behavior is to transfer the attributes with default values
|
||||
and the attributes defined by the user to IR.
|
||||
:param enable_flattening_nested_params:
|
||||
Enable flattening optional params to be used for custom layers. Use
|
||||
this option if you want to transfer attributes of a custom layer to IR
|
||||
with flattened nested parameters. Default behavior is to transfer
|
||||
the attributes without flattening nested parameters.
|
||||
|
||||
Kaldi-specific parameters:
|
||||
:param counts:
|
||||
Path to the counts file
|
||||
:param remove_output_softmax:
|
||||
Removes the SoftMax layer that is the output layer
|
||||
:param remove_memory:
|
||||
Removes the Memory layer and use additional inputs outputs instead
|
||||
|
||||
Returns:
|
||||
openvino.runtime.Model
|
||||
"""
|
||||
params = locals()
|
||||
logger_state = get_logger_state()
|
||||
del params['args']
|
||||
params.update(args)
|
||||
cli_parser = get_all_cli_parser()
|
||||
ov_model, _ = _convert(cli_parser, params, True)
|
||||
restore_logger_state(logger_state)
|
||||
|
@ -15,40 +15,31 @@ try:
|
||||
except ImportError:
|
||||
import openvino.tools.ovc.telemetry_stub as tm
|
||||
|
||||
from openvino.tools.ovc.moc_frontend.check_config import legacy_transformations_config_used, \
|
||||
tensorflow_custom_operations_config_update_used, new_extensions_used
|
||||
from openvino.tools.ovc.moc_frontend.check_config import new_extensions_used
|
||||
from openvino.tools.ovc.moc_frontend.pipeline import moc_pipeline
|
||||
from openvino.tools.ovc.moc_frontend.moc_emit_ir import moc_emit_ir
|
||||
from openvino.tools.ovc.convert_data_type import destination_type_to_np_data_type
|
||||
from openvino.tools.ovc.cli_parser import check_available_transforms, \
|
||||
get_advanced_cli_options, get_available_front_ends, get_caffe_cli_options, \
|
||||
get_common_cli_options, get_kaldi_cli_options, get_layout_values, get_freeze_placeholder_values, \
|
||||
get_mean_scale_dictionary, get_mxnet_cli_options, get_onnx_cli_options, \
|
||||
get_placeholder_shapes, get_tf_cli_options, parse_transform, parse_tuple_pairs, \
|
||||
get_model_name_from_args, depersonalize, get_mo_convert_params, input_to_input_cut_info, \
|
||||
input_shape_to_input_cut_info, freeze_placeholder_to_input_cut_info
|
||||
from openvino.tools.ovc.cli_parser import get_available_front_ends, \
|
||||
get_common_cli_options, get_model_name_from_args, depersonalize, get_mo_convert_params, \
|
||||
input_to_input_cut_info, freeze_placeholder_to_input_cut_info
|
||||
|
||||
from openvino.tools.ovc.error import Error, FrameworkError, legacy_path_error
|
||||
from openvino.tools.ovc.get_ov_update_message import get_ov_update_message, get_ov_api20_message, \
|
||||
get_tf_fe_message, get_try_legacy_fe_message, get_compression_message
|
||||
from openvino.tools.ovc.error import Error, FrameworkError
|
||||
from openvino.tools.ovc.get_ov_update_message import get_ov_update_message, get_compression_message
|
||||
from openvino.tools.ovc.version import VersionChecker
|
||||
from openvino.tools.ovc.utils import deduce_legacy_frontend_by_namespace, refer_to_faq_msg, check_values_equal
|
||||
from openvino.tools.ovc.logger import init_logger, progress_printer
|
||||
from openvino.tools.ovc.utils import check_values_equal
|
||||
from openvino.tools.ovc.logger import init_logger
|
||||
from openvino.tools.ovc.telemetry_utils import send_params_info, send_conversion_result, \
|
||||
get_tid
|
||||
from openvino.tools.ovc.moc_frontend.check_config import legacy_extensions_used
|
||||
from openvino.tools.ovc.moc_frontend.check_config import default_path as extensions_default_path
|
||||
from openvino.tools.ovc.moc_frontend.pytorch_frontend_utils import get_pytorch_decoder, extract_input_info_from_example
|
||||
from openvino.tools.ovc.moc_frontend.paddle_frontend_utils import paddle_frontend_converter
|
||||
from openvino.tools.ovc.moc_frontend.shape_utils import parse_input_shapes
|
||||
|
||||
# pylint: disable=no-name-in-module,import-error
|
||||
from openvino.frontend import FrontEndManager, OpConversionFailure, ProgressReporterExtension, TelemetryExtension
|
||||
from openvino.frontend import FrontEndManager, OpConversionFailure, TelemetryExtension
|
||||
from openvino.runtime import get_version as get_rt_version
|
||||
from openvino.runtime import Type, PartialShape
|
||||
|
||||
try:
|
||||
from openvino.frontend.tensorflow.utils import type_supported_by_tf_fe, create_tf_graph_iterator, \
|
||||
from openvino.frontend.tensorflow.utils import create_tf_graph_iterator, type_supported_by_tf_fe, \
|
||||
extract_model_graph # pylint: disable=no-name-in-module,import-error
|
||||
|
||||
tf_frontend_with_python_bindings_installed = True
|
||||
@ -63,31 +54,13 @@ def replace_ext(name: str, old: str, new: str):
|
||||
return base + new
|
||||
|
||||
|
||||
def print_argv(argv: argparse.Namespace, is_caffe: bool, is_tf: bool, is_mxnet: bool, is_kaldi: bool, is_onnx: bool,
|
||||
model_name: str):
|
||||
def print_argv(argv: argparse.Namespace, model_name: str):
|
||||
print('Model Conversion arguments:')
|
||||
props = OrderedDict()
|
||||
props['common_args'] = get_common_cli_options(model_name)
|
||||
props['advanced_args'] = get_advanced_cli_options()
|
||||
if is_caffe:
|
||||
props['caffe_args'] = get_caffe_cli_options()
|
||||
if is_tf:
|
||||
props['tf_args'] = get_tf_cli_options()
|
||||
if is_mxnet:
|
||||
props['mxnet_args'] = get_mxnet_cli_options()
|
||||
if is_kaldi:
|
||||
props['kaldi_args'] = get_kaldi_cli_options()
|
||||
if is_onnx:
|
||||
props['onnx_args'] = get_onnx_cli_options()
|
||||
|
||||
framework_specifics_map = {
|
||||
'common_args': 'Common parameters:',
|
||||
'advanced_args': 'Advanced parameters:',
|
||||
'caffe_args': 'Caffe specific parameters:',
|
||||
'tf_args': 'TensorFlow specific parameters:',
|
||||
'mxnet_args': 'MXNet specific parameters:',
|
||||
'kaldi_args': 'Kaldi specific parameters:',
|
||||
'onnx_args': 'ONNX specific parameters:',
|
||||
'common_args': 'Common parameters:'
|
||||
}
|
||||
|
||||
lines = []
|
||||
@ -97,233 +70,50 @@ def print_argv(argv: argparse.Namespace, is_caffe: bool, is_tf: bool, is_mxnet:
|
||||
if isinstance(desc, list):
|
||||
lines.append('\t{}: \t{}'.format(desc[0], desc[1](getattr(argv, op, 'NONE'))))
|
||||
else:
|
||||
if op == 'k':
|
||||
default_path = os.path.join(os.path.dirname(sys.argv[0]),
|
||||
'openvino/tools/mo/front/caffe/CustomLayersMapping.xml')
|
||||
if getattr(argv, op, 'NONE') == default_path:
|
||||
lines.append('\t{}: \t{}'.format(desc, 'Default'))
|
||||
continue
|
||||
lines.append('\t{}: \t{}'.format(desc, getattr(argv, op, 'NONE')))
|
||||
print('\n'.join(lines), flush=True)
|
||||
|
||||
|
||||
def legacy_framework_check(is_caffe, is_mxnet, is_kaldi):
|
||||
if is_caffe:
|
||||
legacy_path_error("The provided model is from Caffe framework. This is legacy functionality. ")
|
||||
if is_mxnet:
|
||||
legacy_path_error("The provided model is from MxNet framework. This is legacy functionality. ")
|
||||
if is_kaldi:
|
||||
legacy_path_error("The provided model is from Kaldi framework. This is legacy functionality. ")
|
||||
|
||||
|
||||
def check_legacy_args(non_default_params, python_api_used):
|
||||
ignored_cli_options = ["output_dir", "model_name"]
|
||||
legacy_groups = ['Kaldi-specific parameters:', 'Caffe*-specific parameters:', 'MXNet-specific parameters:']
|
||||
tf_legacy_args = ['tensorflow_custom_operations_config_update', 'tensorflow_object_detection_api_pipeline_config',
|
||||
'tensorboard_logdir', 'tensorflow_custom_layer_libraries', 'saved_model_tags']
|
||||
mo_convert_params = get_mo_convert_params()
|
||||
|
||||
for key, value in non_default_params.items():
|
||||
if key in ignored_cli_options:
|
||||
if python_api_used:
|
||||
print("The provided option \"{}\" is applicable in command line tool only. The option will be ignored.".format(key))
|
||||
for group in legacy_groups:
|
||||
if key in mo_convert_params[group]:
|
||||
legacy_path_error("The provided option \"{}\" refers to legacy functionality. ".format(key))
|
||||
if key in tf_legacy_args:
|
||||
legacy_path_error("The provided option \"{}\" refers to legacy functionality. ".format(key))
|
||||
|
||||
|
||||
|
||||
def arguments_post_parsing(argv: argparse.Namespace):
|
||||
use_legacy_frontend = argv.use_legacy_frontend
|
||||
use_new_frontend = argv.use_new_frontend
|
||||
if argv.extensions is None:
|
||||
argv.extensions = [extensions_default_path()]
|
||||
|
||||
if use_new_frontend and use_legacy_frontend:
|
||||
raise Error('Options "use_new_frontend" and "use_legacy_frontend" must not be used simultaneously.')
|
||||
|
||||
if use_legacy_frontend:
|
||||
legacy_path_error('Option "use_legacy_frontend" was used, but legacy frontends are not available. ')
|
||||
|
||||
moc_front_end, available_moc_front_ends = get_moc_frontends(argv)
|
||||
|
||||
if not moc_front_end and use_new_frontend:
|
||||
raise Error('Option "use_new_frontend" is specified but the Model Conversion API is unable to find new frontend. '
|
||||
'Please ensure that your environment contains new frontend for the input model format or '
|
||||
'try to install openvino-dev and convert the model using convert_model() from openvino.tools.mo.')
|
||||
|
||||
is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = \
|
||||
deduce_legacy_frontend_by_namespace(argv) if not moc_front_end else [False, False, False, False, False]
|
||||
|
||||
legacy_framework_check(is_caffe, is_mxnet, is_kaldi)
|
||||
|
||||
is_legacy_frontend = any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx])
|
||||
|
||||
# handle a default case, i.e. use_new_frontend and use_legacy_frontend are not specified, when no frontend is found
|
||||
if not is_legacy_frontend and not moc_front_end:
|
||||
legacy_frameworks = ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx']
|
||||
frameworks = list(set(legacy_frameworks + available_moc_front_ends))
|
||||
if not argv.framework:
|
||||
raise Error('Framework name can not be deduced from the given options: {}={}. '
|
||||
'Please use "framework" with one from the list: {}.',
|
||||
'"input_model="', argv.input_model, frameworks)
|
||||
elif argv.framework not in frameworks:
|
||||
if argv.framework == 'ir':
|
||||
raise Error('OpenVINO IR is passed as input_model in convert_model/mo, the IR doesn\'t need '
|
||||
'conversion, please use it in runtime for inference with read_model/compile_model.')
|
||||
raise Error('Framework {} is not a valid target. Please use "framework" with one from the list: {}. ' +
|
||||
refer_to_faq_msg(15), argv.framework, frameworks)
|
||||
|
||||
if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
|
||||
raise Error('Path to input model or saved model dir is required: use "input_model", "saved_model_dir" or '
|
||||
'"input_meta_graph"')
|
||||
elif is_onnx and not argv.input_model:
|
||||
raise Error('Path to input model is required: use "input_model".')
|
||||
|
||||
# TODO: This function looks similar to another one. Check for code duplicates.
|
||||
log.debug("Model Conversion API started")
|
||||
log.debug('Output model name would be {}{{.xml, .bin}}'.format(argv.output_model))
|
||||
|
||||
log.debug('Output model name would be {}{{.xml, .bin}}'.format(argv.model_name))
|
||||
if argv.verbose:
|
||||
print_argv(argv, argv.output_model)
|
||||
|
||||
if not argv.silent:
|
||||
print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx, argv.model_name)
|
||||
|
||||
argv.data_type = 'FP32' # if compression was enabled will be restored back to 'FP16' after apply_offline_transformations
|
||||
|
||||
# This is just to check that transform key is valid and transformations are available
|
||||
check_available_transforms(parse_transform(argv.transform))
|
||||
|
||||
if argv.scale and argv.scale_values:
|
||||
raise Error(
|
||||
'Both "scale" and "scale_values" are defined. Specify either scale factor or scale values per input ' +
|
||||
'channels. ' + refer_to_faq_msg(19))
|
||||
|
||||
if argv.scale and argv.scale < 1.0:
|
||||
log.error("The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
|
||||
"floating point value which all input values will be *divided*.", extra={'is_warning': True})
|
||||
|
||||
if argv.input_model and (is_tf and argv.saved_model_dir):
|
||||
raise Error('Both "input_model" and "saved_model_dir" are defined. '
|
||||
'Specify either input model or saved model directory.')
|
||||
if is_tf:
|
||||
if argv.saved_model_tags is not None:
|
||||
if ' ' in argv.saved_model_tags:
|
||||
raise Error('Incorrect saved model tag was provided. Specify "saved_model_tags" with no spaces in it')
|
||||
argv.saved_model_tags = argv.saved_model_tags.split(',')
|
||||
|
||||
if hasattr(argv, 'is_python_api_used') and argv.is_python_api_used:
|
||||
python_api_params_parsing(argv)
|
||||
else:
|
||||
argv.inputs_list, argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
|
||||
argv.input, argv.input_shape, argv.batch)
|
||||
argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
|
||||
argv.input,
|
||||
argv.freeze_placeholder_with_value)
|
||||
argv.unnamed_freeze_placeholder_with_value = {}
|
||||
params_parsing(argv)
|
||||
argv.output = argv.output.split(',') if argv.output else None
|
||||
argv.layout_values = get_layout_values(argv.layout, argv.source_layout, argv.target_layout)
|
||||
mean_values = parse_tuple_pairs(argv.mean_values)
|
||||
scale_values = parse_tuple_pairs(argv.scale_values)
|
||||
mean_scale = get_mean_scale_dictionary(mean_values, scale_values, argv.input)
|
||||
argv.mean_scale_values = mean_scale
|
||||
|
||||
log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))
|
||||
|
||||
return argv
|
||||
|
||||
|
||||
def check_fallback(argv: argparse.Namespace):
|
||||
fallback_reasons = {}
|
||||
|
||||
# Some frontend such as PDPD does not have legacy path so it has no reasons to fallback
|
||||
if not any(deduce_legacy_frontend_by_namespace(argv)):
|
||||
return fallback_reasons
|
||||
|
||||
if argv.use_new_frontend:
|
||||
return fallback_reasons
|
||||
|
||||
fallback_reasons['extensions'] = legacy_extensions_used
|
||||
fallback_reasons['transformations_config'] = legacy_transformations_config_used
|
||||
fallback_reasons['tensorflow_custom_operations_config_update'] = tensorflow_custom_operations_config_update_used
|
||||
|
||||
reasons = [reason for reason, is_applicable in fallback_reasons.items() if is_applicable(argv)]
|
||||
return reasons
|
||||
|
||||
|
||||
def update_fallback_with_conversion_error(use_new_frontend: bool, is_tf: bool, ex_msg: str, fallback_reasons: list):
|
||||
import re
|
||||
if not is_tf:
|
||||
# this sort of fallback is only used by TensorFlow Frontend
|
||||
return False
|
||||
|
||||
if use_new_frontend:
|
||||
# this option forces to use new TensorFlow Frontend
|
||||
# so it is not possible for the fallback
|
||||
return False
|
||||
|
||||
# for TensorFlow FE we have a set of operations that should lead to the fallback to the legacy
|
||||
conversion_error_re = r"^(\[TensorFlow\ Frontend\]\ Internal\ error\,\ no\ translator\ found\ for\ operation\(s\)\:\ )((\w+)(\,\ \w+)*)$"
|
||||
conversion_error_match = re.findall(conversion_error_re, ex_msg, re.MULTILINE)
|
||||
all_fallback_operations = [
|
||||
# corresponds to TF1 While operation
|
||||
"TensorArrayScatterV3", "TensorArrayV3", "TensorArraySizeV3", "TensorArrayGatherV3",
|
||||
"LoopCond", "Enter", "NextIteration", "Exit",
|
||||
# corresponds to operations with complex tensors
|
||||
"FFT", "FFT2D", "FFT3D", "IFFT", "IFFT2D", "IFFT3D",
|
||||
"RFFT", "RFFT2D", "RFFT3D", "IRFFT", "IRFFT2D", "IRFFT3D",
|
||||
"Complex", "ComplexAbs", "Real", "Imag",
|
||||
]
|
||||
if len(conversion_error_match) < 1 or len(conversion_error_match[0]) != 4:
|
||||
# no match for the fallback by unsupported operation
|
||||
return False
|
||||
|
||||
unsupported_operations = conversion_error_match[0][1].replace(" ", "").split(",")
|
||||
fallback_operations = [operation for operation in unsupported_operations if operation in all_fallback_operations]
|
||||
|
||||
if len(fallback_operations) == 0:
|
||||
return False
|
||||
|
||||
fallback_reasons.append("Fallback to the legacy TF FE due to operation(s): " + ', '.join(fallback_operations))
|
||||
return True
|
||||
|
||||
|
||||
def get_default_frontends():
|
||||
# Set which frontend to use by default, values should be 'new' or 'legacy'
|
||||
default_frontends = {
|
||||
'onnx': 'new',
|
||||
'tf': 'new'
|
||||
}
|
||||
return default_frontends
|
||||
|
||||
|
||||
def get_moc_frontends(argv: argparse.Namespace):
|
||||
fem = argv.feManager
|
||||
|
||||
# Read user flags:
|
||||
use_legacy_frontend = argv.use_legacy_frontend
|
||||
use_new_frontend = argv.use_new_frontend
|
||||
|
||||
if not fem or use_legacy_frontend:
|
||||
if not fem:
|
||||
return None, []
|
||||
|
||||
available_moc_front_ends = get_available_front_ends(fem)
|
||||
|
||||
if not argv.framework and argv.input_model:
|
||||
if argv.framework:
|
||||
moc_front_end = fem.load_by_framework(argv.framework) # WA to prevent process hanging. Need to remove when 115994 fixed.
|
||||
moc_front_end = fem.load_by_framework(argv.framework)
|
||||
return moc_front_end, available_moc_front_ends
|
||||
if argv.input_model:
|
||||
if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 2:
|
||||
moc_front_end = fem.load_by_model([argv.input_model[0], argv.input_model[1]]) # WA to prevent process hanging. Need to remove when 115994 fixed.
|
||||
moc_front_end = fem.load_by_model([argv.input_model[0], argv.input_model[1]]) # TODO: Pass all input model parts
|
||||
else:
|
||||
moc_front_end = fem.load_by_model(argv.input_model) # WA to prevent process hanging. Need to remove when 115994 fixed.
|
||||
moc_front_end = fem.load_by_model(argv.input_model)
|
||||
if not moc_front_end:
|
||||
return None, available_moc_front_ends
|
||||
argv.framework = moc_front_end.get_name()
|
||||
elif argv.framework in available_moc_front_ends:
|
||||
moc_front_end = fem.load_by_framework(argv.framework)
|
||||
else:
|
||||
return None, []
|
||||
|
||||
default_frontends = get_default_frontends()
|
||||
# Disable MOC frontend if default is set to legacy and no user override
|
||||
if default_frontends.get(moc_front_end.get_name()) == 'legacy' and not use_new_frontend:
|
||||
return None, available_moc_front_ends
|
||||
|
||||
# This check as a workaround to skip IR frontend
|
||||
if not moc_front_end.get_name() in available_moc_front_ends:
|
||||
return None, available_moc_front_ends
|
||||
@ -332,59 +122,32 @@ def get_moc_frontends(argv: argparse.Namespace):
|
||||
|
||||
|
||||
def prepare_ir(argv: argparse.Namespace):
|
||||
# TODO: remove this workaround once new TensorFlow frontend supports non-frozen formats: checkpoint, MetaGraph, and SavedModel
|
||||
# Now it converts all TensorFlow formats to the frozen .pb format in case new TensorFlow frontend
|
||||
is_tf, _, _, _, _ = deduce_legacy_frontend_by_namespace(argv)
|
||||
argv = arguments_post_parsing(argv)
|
||||
t = tm.Telemetry()
|
||||
|
||||
graph = None
|
||||
fallback_reasons = []
|
||||
if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 1:
|
||||
argv.input_model = argv.input_model[0]
|
||||
|
||||
moc_front_end, available_moc_front_ends = get_moc_frontends(argv)
|
||||
if moc_front_end:
|
||||
fallback_reasons = check_fallback(argv)
|
||||
if len(fallback_reasons) == 0:
|
||||
if is_tf and tf_frontend_with_python_bindings_installed and \
|
||||
type_supported_by_tf_fe(argv.input_model):
|
||||
# TODO: Should be moved to the same place where paddle and pytorch handle their objects
|
||||
if argv.framework == 'tf' and argv.is_python_object and type_supported_by_tf_fe(argv.input_model):
|
||||
argv.input_model = create_tf_graph_iterator(argv.input_model,
|
||||
argv.placeholder_shapes,
|
||||
argv.placeholder_data_types,
|
||||
getattr(argv, "example_input", None))
|
||||
try:
|
||||
t.send_event("mo", "conversion_method", moc_front_end.get_name() + "_frontend")
|
||||
moc_front_end.add_extension(TelemetryExtension("mo", t.send_event, t.send_error, t.send_stack_trace))
|
||||
moc_front_end.add_extension(ProgressReporterExtension(progress_printer(argv)))
|
||||
if legacy_transformations_config_used(argv):
|
||||
raise Error('Legacy extensions are not supported for the new frontend')
|
||||
if legacy_extensions_used(argv):
|
||||
raise Error('Legacy transformations configuration is not supported for the new frontend')
|
||||
if tensorflow_custom_operations_config_update_used(argv) and is_tf:
|
||||
raise Error('TensorFlow custom operation config is not supported for the new frontend')
|
||||
if new_extensions_used(argv):
|
||||
for extension in argv.extensions:
|
||||
moc_front_end.add_extension(extension)
|
||||
ngraph_function = moc_pipeline(argv, moc_front_end)
|
||||
return graph, ngraph_function
|
||||
except OpConversionFailure as ex:
|
||||
# in some set of operations (TF1 While), we have to fallback to the Legacy TensorFlow Frontend
|
||||
# this is the second attempt for the fallback
|
||||
if not update_fallback_with_conversion_error(argv.use_new_frontend, is_tf, str(ex), fallback_reasons):
|
||||
# re-throw exception for all frontends except TensorFlow FE
|
||||
# and in case unexpected conversion failures
|
||||
raise
|
||||
ov_model = moc_pipeline(argv, moc_front_end)
|
||||
return ov_model
|
||||
|
||||
if len(fallback_reasons) > 0:
|
||||
reasons_message = ", ".join(fallback_reasons)
|
||||
t.send_event("mo", "fallback_reason", reasons_message)
|
||||
log.warning("The IR preparation cannot be executed with new frontend. "
|
||||
f"The detailed reason why fallback to legacy is needed: not supported {reasons_message} were used. " +
|
||||
refer_to_faq_msg(105))
|
||||
assert not hasattr(argv, 'is_fallback'), '`is_fallback` argument must not exist.'
|
||||
argv.is_fallback = True
|
||||
if not argv.input_model:
|
||||
raise Error('No input model is provided')
|
||||
|
||||
t.send_event("mo", "conversion_method", "mo_legacy")
|
||||
legacy_path_error("The provided model cannot be converted with new frontend, as fallback to legacy is needed. ")
|
||||
return None, None
|
||||
raise Error('Cannot recognize input model.')
|
||||
|
||||
|
||||
def check_model_object(argv):
|
||||
@ -405,6 +168,10 @@ def check_model_object(argv):
|
||||
pass
|
||||
|
||||
import io
|
||||
# FIXME: Consuming any io.BytesIO object as an ONNX model is too dengerous and
|
||||
# can conflict with others in the future (not future proof).
|
||||
# TODO: Refer to https://onnx.ai/onnx/intro/python.html to find examples with
|
||||
# real ONNX python objects. ONNX model has onnx.onnx_ml_pb2.ModelProto type.
|
||||
if isinstance(model, io.BytesIO):
|
||||
return 'onnx'
|
||||
|
||||
@ -419,24 +186,18 @@ def check_model_object(argv):
|
||||
|
||||
|
||||
def driver(argv: argparse.Namespace, non_default_params: dict):
|
||||
init_logger(argv.log_level.upper(), argv.silent)
|
||||
if not hasattr(argv, 'log_level'):
|
||||
argv.log_level = 'ERROR'
|
||||
init_logger(argv.log_level.upper(), argv.verbose)
|
||||
|
||||
# Log dictionary with non-default cli parameters where complex classes are excluded.
|
||||
log.debug(str(non_default_params))
|
||||
|
||||
start_time = datetime.datetime.now()
|
||||
|
||||
graph, ngraph_function = prepare_ir(argv)
|
||||
legacy_path = False
|
||||
if graph is not None:
|
||||
legacy_path_error("")
|
||||
else:
|
||||
res_ngraph_function = moc_emit_ir(ngraph_function, argv)
|
||||
ov_model = moc_emit_ir(prepare_ir(argv), argv)
|
||||
|
||||
if res_ngraph_function is None:
|
||||
return res_ngraph_function
|
||||
|
||||
if not argv.silent:
|
||||
if argv.verbose:
|
||||
elapsed_time = datetime.datetime.now() - start_time
|
||||
print('[ SUCCESS ] Total execution time: {:.2f} seconds. '.format(elapsed_time.total_seconds()))
|
||||
try:
|
||||
@ -448,7 +209,7 @@ def driver(argv: argparse.Namespace, non_default_params: dict):
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return res_ngraph_function, legacy_path
|
||||
return ov_model
|
||||
|
||||
|
||||
def args_dict_to_list(cli_parser, **kwargs):
|
||||
@ -536,19 +297,17 @@ def show_mo_convert_help():
|
||||
print()
|
||||
|
||||
|
||||
def input_model_is_object(argv):
|
||||
# Input model can be set as object only for "input_model" parameter.
|
||||
# "saved_model_dir" or meta specific options are only used to store paths to the input model.
|
||||
if 'input_model' not in argv:
|
||||
def input_model_is_object(input_model):
|
||||
if input_model == ():
|
||||
return False
|
||||
if isinstance(argv['input_model'], (str, Path)):
|
||||
return False
|
||||
if argv['input_model'] is None:
|
||||
if isinstance(input_model, (str, Path)):
|
||||
return False
|
||||
if isinstance(input_model, (tuple, list)):
|
||||
return all(input_model_is_object(part) for part in input_model)
|
||||
return True
|
||||
|
||||
|
||||
def python_api_params_parsing(argv: argparse.Namespace):
|
||||
def params_parsing(argv: argparse.Namespace):
|
||||
"""
|
||||
Parses params passed to convert_model and wraps resulting values into dictionaries or lists.
|
||||
After working of this method following values are set in argv:
|
||||
@ -583,15 +342,12 @@ def python_api_params_parsing(argv: argparse.Namespace):
|
||||
argv.inputs_list = input_names_list
|
||||
argv.input = ','.join(input_names_list)
|
||||
|
||||
# Parse input_shape param and update InputCutInfo list
|
||||
input_shape_to_input_cut_info(argv.input_shape, inputs)
|
||||
|
||||
# Parse freeze_placeholder_with_value.
|
||||
# values for freezing can be set both by named and unnamed approach if
|
||||
# 'input' was used without names and 'freeze_placeholder_with_value' was used with names.
|
||||
# So named and unnamed values are stored separately.
|
||||
argv.freeze_placeholder_with_value, argv.unnamed_freeze_placeholder_with_value = \
|
||||
freeze_placeholder_to_input_cut_info(argv.freeze_placeholder_with_value, inputs)
|
||||
freeze_placeholder_to_input_cut_info(inputs)
|
||||
|
||||
if len(input_names_list) > 0:
|
||||
# Named inputs case
|
||||
@ -638,7 +394,7 @@ def python_api_params_parsing(argv: argparse.Namespace):
|
||||
def pack_params_to_args_namespace(args: dict, cli_parser: argparse.ArgumentParser):
|
||||
if len(args) > 0:
|
||||
args_string = params_to_string(**args)
|
||||
argv, _ = cli_parser.parse_known_args(args_dict_to_list(cli_parser, **args_string))
|
||||
argv, _ = cli_parser.parse_known_args(args_dict_to_list(cli_parser, **args_string)) # FIXME: input_model_args can be a list
|
||||
|
||||
# get list of all available params for convert_model()
|
||||
all_params = {}
|
||||
@ -659,36 +415,12 @@ def pack_params_to_args_namespace(args: dict, cli_parser: argparse.ArgumentParse
|
||||
return argv
|
||||
|
||||
|
||||
def update_args_for_saved_model_dir(args: dict):
|
||||
"""
|
||||
If directory is set in 'input_model' argument, the directory is considered as TF saved model.
|
||||
In this case this method updates args and moves saved model directory to 'saved_model_dir' param.
|
||||
:param args: dictionary with arguments from user
|
||||
"""
|
||||
if 'saved_model_dir' in args and args['saved_model_dir'] is not None and \
|
||||
'input_model' in args and args['input_model'] is not None:
|
||||
raise Error("Both \"input_model\" and \"saved_model_dir\" are defined. "
|
||||
"Please specify either \"input_model\" or \"saved_model_dir\" directory.")
|
||||
|
||||
if 'input_model' in args and isinstance(args['input_model'], (str, Path)) and os.path.isdir(args['input_model']):
|
||||
args['saved_model_dir'] = args['input_model']
|
||||
args['input_model'] = None
|
||||
|
||||
|
||||
def silent_is_false(argv: argparse.Namespace):
|
||||
return argv is not None and hasattr(argv, 'silent') and argv.silent is False
|
||||
|
||||
|
||||
def framework_is_tf(args, argv):
|
||||
if input_model_is_object(args) and check_model_object(args) == "tf":
|
||||
return True
|
||||
if argv is not None:
|
||||
is_tf, _, _, _, _ = deduce_legacy_frontend_by_namespace(argv)
|
||||
return is_tf
|
||||
return False
|
||||
def is_verbose(argv: argparse.Namespace):
|
||||
return argv is not None and hasattr(argv, 'verbose') and argv.verbose
|
||||
|
||||
|
||||
def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
||||
# FIXME: It doesn't work when -h is passed
|
||||
if 'help' in args and args['help']:
|
||||
show_mo_convert_help()
|
||||
return None, None
|
||||
@ -701,9 +433,16 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
||||
# before arg parser deliver log_level requested by user
|
||||
init_logger('ERROR', False)
|
||||
argv = None
|
||||
# Minimize modifications among other places in case if multiple pieces are passed as input_model
|
||||
if python_api_used:
|
||||
if 'input_model' not in args:
|
||||
args['input_model'] = ()
|
||||
if isinstance(args['input_model'], (tuple, list)) and len(args['input_model']) == 1:
|
||||
args['input_model'] = args['input_model'][0]
|
||||
try:
|
||||
model_framework = None
|
||||
inp_model_is_object = input_model_is_object(args)
|
||||
inp_model_is_object = input_model_is_object(args['input_model']) if python_api_used else False
|
||||
|
||||
if inp_model_is_object:
|
||||
model_framework = check_model_object(args)
|
||||
if model_framework == "pytorch":
|
||||
@ -714,12 +453,13 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
||||
raise AssertionError(
|
||||
"'example_inputs' argument is not recognized, maybe you meant to provide 'example_input'?")
|
||||
|
||||
decoder = get_pytorch_decoder(args['input_model'], parse_input_shapes(args), example_inputs, args)
|
||||
get_pytorch_decoder(args['input_model'], example_inputs, args)
|
||||
if model_framework == "paddle":
|
||||
example_inputs = None
|
||||
if 'example_input' in args and args['example_input'] is not None:
|
||||
example_inputs = args['example_input']
|
||||
|
||||
#TODO: Check what example_outputs is and remove if not needed
|
||||
example_outputs = None
|
||||
if 'example_output' in args and args['example_output'] is not None:
|
||||
example_outputs = args['example_output']
|
||||
@ -727,45 +467,27 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
||||
example_outputs)
|
||||
pdmodel = paddle_runtime_converter.convert_paddle_to_pdmodel()
|
||||
args['input_model'] = pdmodel
|
||||
args['framework'] = model_framework
|
||||
|
||||
update_args_for_saved_model_dir(args)
|
||||
|
||||
argv = pack_params_to_args_namespace(args, cli_parser)
|
||||
argv.framework = model_framework
|
||||
argv.is_python_object = inp_model_is_object
|
||||
|
||||
argv.feManager = FrontEndManager()
|
||||
frameworks = list(set(['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'] + (get_available_front_ends(argv.feManager)
|
||||
if argv.feManager else [])))
|
||||
framework = argv.framework if hasattr(argv, 'framework') and argv.framework is not None else framework
|
||||
if framework is not None:
|
||||
assert framework in frameworks, "error: argument \"framework\": invalid choice: '{}'. " \
|
||||
"Expected one of {}.".format(framework, frameworks)
|
||||
setattr(argv, 'framework', framework)
|
||||
|
||||
# send telemetry with params info
|
||||
send_params_info(argv, cli_parser)
|
||||
|
||||
non_default_params = get_non_default_params(argv, cli_parser)
|
||||
check_legacy_args(non_default_params, python_api_used)
|
||||
argv.is_python_api_used = python_api_used
|
||||
|
||||
if inp_model_is_object:
|
||||
argv.model_name = "model"
|
||||
if not hasattr(argv, "model_name") or argv.model_name is None:
|
||||
argv.model_name = get_model_name_from_args(argv)
|
||||
argv.output_model = "model" # TODO: Consider removing
|
||||
if not hasattr(argv, "output_model") or argv.output_model is None:
|
||||
argv.output_model = get_model_name_from_args(argv)
|
||||
|
||||
if model_framework is not None:
|
||||
if argv.framework is not None:
|
||||
if argv.framework != model_framework:
|
||||
raise Error("Provided model does not correspond to provided framework. The provided "
|
||||
"framework is {}, the model type is {} which is expected to be {} framework.".format(
|
||||
argv.framework,
|
||||
type(argv.input_model),
|
||||
model_framework))
|
||||
else:
|
||||
argv.framework = model_framework
|
||||
|
||||
ov_model, legacy_path = driver(argv, {"conversion_parameters": non_default_params})
|
||||
ov_model = driver(argv, {"conversion_parameters": non_default_params})
|
||||
|
||||
if inp_model_is_object and model_framework == "paddle":
|
||||
if paddle_runtime_converter:
|
||||
@ -773,30 +495,22 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
||||
|
||||
# add MO meta data to model
|
||||
ov_model.set_rt_info(get_rt_version(), "Runtime_version")
|
||||
ov_model.set_rt_info(str(legacy_path), "legacy_frontend")
|
||||
for key, value in non_default_params.items():
|
||||
ov_model.set_rt_info(str(value), ["conversion_parameters", str(key)])
|
||||
|
||||
if silent_is_false(argv) or not python_api_used:
|
||||
if is_verbose(argv) or not python_api_used:
|
||||
if 'compress_to_fp16' in argv and argv.compress_to_fp16:
|
||||
print(get_compression_message())
|
||||
|
||||
ov_update_message = get_ov_update_message()
|
||||
ov_api20_message = get_ov_api20_message()
|
||||
if ov_update_message is not None:
|
||||
print(ov_update_message)
|
||||
if ov_api20_message is not None and ov_model is not None:
|
||||
print(ov_api20_message)
|
||||
is_fallback = getattr(argv, 'is_fallback', False)
|
||||
if not argv.use_legacy_frontend and framework_is_tf(args, argv) and not is_fallback:
|
||||
# now TF FE is default frontend for TensorFlow models conversion
|
||||
print(get_tf_fe_message())
|
||||
|
||||
send_conversion_result('success')
|
||||
return ov_model, argv
|
||||
|
||||
except Exception as e:
|
||||
if silent_is_false(argv) or not python_api_used:
|
||||
if is_verbose(argv) or not python_api_used:
|
||||
if isinstance(e, (FileNotFoundError, NotADirectoryError)):
|
||||
log.error('File {} was not found'.format(str(e).split('No such file or directory:')[1]))
|
||||
log.debug(traceback.format_exc())
|
||||
@ -810,17 +524,17 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
|
||||
log.error("-------------------------------------------------")
|
||||
log.error("----------------- INTERNAL ERROR ----------------")
|
||||
log.error("Unexpected exception happened.")
|
||||
log.error("Please contact Model Conversion API developers and forward the following information:")
|
||||
log.error("Please verify parameters and environment.")
|
||||
log.error("If you think this is a bug, please create new ticket here: ")
|
||||
log.error("https://github.com/openvinotoolkit/openvino/issues.")
|
||||
log.error("-------------- DETAILED INFORMATION -------------")
|
||||
log.error(str(e))
|
||||
log.error(traceback.format_exc())
|
||||
log.error("---------------- END OF BUG REPORT --------------")
|
||||
log.error("----------------- END OF REPORT -----------------")
|
||||
log.error("-------------------------------------------------")
|
||||
is_fallback = getattr(argv, 'is_fallback', False) if argv is not None else False
|
||||
if not argv.use_legacy_frontend and framework_is_tf(args, argv) and not is_fallback:
|
||||
print(get_try_legacy_fe_message())
|
||||
|
||||
send_conversion_result('fail')
|
||||
if python_api_used:
|
||||
raise e.with_traceback(None)
|
||||
raise e
|
||||
else:
|
||||
return None, argv
|
||||
|
@ -46,8 +46,3 @@ def classify_error_type(e):
|
||||
if m:
|
||||
return m.group(0)
|
||||
return "undefined"
|
||||
|
||||
|
||||
def legacy_path_error(functionality_description):
|
||||
raise Exception("{}Please try to install openvino-dev and use convert_model() "
|
||||
"from openvino.tools.mo.".format(functionality_description))
|
||||
|
@ -16,24 +16,6 @@ def get_ov_update_message():
|
||||
return msg_fmt.format(link) if current_date >= expected_update_date else None
|
||||
|
||||
|
||||
def get_ov_api20_message():
|
||||
link = "https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html"
|
||||
message = '[ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework ' \
|
||||
'input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, ' \
|
||||
'please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11.\n' \
|
||||
'Find more information about API v2.0 and IR v11 at {}'.format(link)
|
||||
|
||||
return message
|
||||
|
||||
|
||||
def get_tf_fe_message():
|
||||
link = "https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_TensorFlow_Frontend.html"
|
||||
message = '[ INFO ] IR generated by new TensorFlow Frontend is compatible only with API v2.0. Please make sure to use API v2.0.\n' \
|
||||
'Find more information about new TensorFlow Frontend at {}'.format(link)
|
||||
|
||||
return message
|
||||
|
||||
|
||||
def get_compression_message():
|
||||
link = "https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html"
|
||||
message = '[ INFO ] Generated IR will be compressed to FP16. ' \
|
||||
@ -41,8 +23,3 @@ def get_compression_message():
|
||||
'by removing argument "compress_to_fp16" or set it to false "compress_to_fp16=False".\n' \
|
||||
'Find more information about compression to FP16 at {}'.format(link)
|
||||
return message
|
||||
|
||||
|
||||
def get_try_legacy_fe_message():
|
||||
message = '[ INFO ] You can also try to install openvino-dev and use convert_model from openvino.tools.mo.\n'
|
||||
return message
|
||||
|
@ -2,19 +2,18 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
def get_convert_model_help_specifics():
|
||||
from openvino.tools.ovc.cli_parser import CanonicalizeTransformationPathCheckExistenceAction, \
|
||||
CanonicalizePathCheckExistenceAction, CanonicalizeExtensionsPathCheckExistenceAction, \
|
||||
CanonicalizePathCheckExistenceIfNeededAction, readable_file_or_dir, readable_dirs_or_files_or_empty, \
|
||||
check_positive
|
||||
from openvino.tools.ovc.cli_parser import CanonicalizePathCheckExistenceAction, CanonicalizeExtensionsPathCheckExistenceAction, \
|
||||
readable_file_or_dir, readable_dirs_or_files_or_empty
|
||||
from openvino.tools.ovc.version import VersionChecker
|
||||
return {
|
||||
'input_model':
|
||||
{'description':
|
||||
'Tensorflow*: a file with a pre-trained model '
|
||||
'(binary or text .pb file after freezing). '
|
||||
'Caffe*: a model proto file with model weights.', 'action': CanonicalizePathCheckExistenceAction,
|
||||
'Input model file(s) from TensorFlow, ONNX, PaddlePaddle. '
|
||||
'Use openvino.convert_model in Python to convert models from Pytorch.'
|
||||
'',
|
||||
'action': CanonicalizePathCheckExistenceAction,
|
||||
'type': readable_file_or_dir,
|
||||
'aliases': {'-w', '-m'}},
|
||||
'aliases': {}},
|
||||
'input_shape':
|
||||
{'description':
|
||||
'Input shape(s) that should be fed to an input node(s) '
|
||||
@ -38,10 +37,11 @@ def get_convert_model_help_specifics():
|
||||
'Alternatively, specify shapes with the --input option.'},
|
||||
'input':
|
||||
{'description':
|
||||
'Quoted list of comma-separated input nodes names with '
|
||||
'shapes, data types, and values for freezing. The order '
|
||||
'of inputs in converted model is the same as order of '
|
||||
'specified operation names. The shape and value are '
|
||||
'Information for model input required for model conversion. '
|
||||
'This is a comma separated list with optional '
|
||||
'input names, shapes, data types, and values for freezing. '
|
||||
'The order of inputs in converted model will match the order of '
|
||||
'specified inputs. The shape and value are '
|
||||
'specified as comma-separated lists. The data type of '
|
||||
'input node is specified in braces and can have one of '
|
||||
'the values: f64 (float64), f32 (float32), f16 '
|
||||
@ -62,38 +62,6 @@ def get_convert_model_help_specifics():
|
||||
'\"node_name2\" with the value [20,15] of the int32 type '
|
||||
'and shape [2]: \n '
|
||||
'\"0:node_name1[3,4],node_name2:1[2]{i32}->[20,15]\".'},
|
||||
'mean_values':
|
||||
{'description':
|
||||
'Mean values to be used for the input image per '
|
||||
'channel. Values to be provided in the (R,G,B) or '
|
||||
'[R,G,B] format. Can be defined for desired input of '
|
||||
'the model, for example: "--mean_values '
|
||||
'data[255,255,255],info[255,255,255]". The exact '
|
||||
'meaning and order of channels depend on how the '
|
||||
'original model was trained.'},
|
||||
'scale_values':
|
||||
{'description':
|
||||
'Scale values to be used for the input image per '
|
||||
'channel. Values are provided in the (R,G,B) or [R,G,B] '
|
||||
'format. Can be defined for desired input of the model, '
|
||||
'for example: "--scale_values '
|
||||
'data[255,255,255],info[255,255,255]". The exact '
|
||||
'meaning and order of channels depend on how the '
|
||||
'original model was trained. If both --mean_values and '
|
||||
'--scale_values are specified, the mean is subtracted '
|
||||
'first and then scale is applied regardless of the '
|
||||
'order of options in command line.'},
|
||||
'source_layout':
|
||||
{'description':
|
||||
'Layout of the input or output of the model in the '
|
||||
'framework. Layout can be specified in the short form, '
|
||||
'e.g. nhwc, or in complex form, e.g. \"[n,h,w,c]\". '
|
||||
'Example for many names: \"in_name1([n,h,w,c]),in_name2('
|
||||
'nc),out_name1(n),out_name2(nc)\". Layout can be '
|
||||
'partially defined, \"?\" can be used to specify '
|
||||
'undefined layout for one dimension, \"...\" can be used '
|
||||
'to specify undefined layout for multiple dimensions, '
|
||||
'for example \"?c??\", \"nc...\", \"n...c\", etc.'},
|
||||
'transform':
|
||||
{'description':
|
||||
'Apply additional transformations. Usage: \"--transform '
|
||||
@ -115,47 +83,19 @@ def get_convert_model_help_specifics():
|
||||
'those that are placed at the default location, pass an empty string.',
|
||||
'action': CanonicalizeExtensionsPathCheckExistenceAction,
|
||||
'type': readable_dirs_or_files_or_empty},
|
||||
'transformations_config':
|
||||
{'description':
|
||||
'Use the configuration file with transformations '
|
||||
'description. Transformations file can be specified as '
|
||||
'relative path from the current directory, as absolute '
|
||||
'path or as arelative path from the mo root directory.',
|
||||
'action': CanonicalizeTransformationPathCheckExistenceAction},
|
||||
'counts':
|
||||
{'action': CanonicalizePathCheckExistenceIfNeededAction},
|
||||
'version':
|
||||
{'action': 'version',
|
||||
'version': 'Version of Model Optimizer is: {}'.format(VersionChecker().get_ie_version())},
|
||||
'scale':
|
||||
{'type': float,
|
||||
'aliases': {'-s'}},
|
||||
'batch':
|
||||
{'type': check_positive,
|
||||
'aliases': {'-b'}},
|
||||
'input_proto':
|
||||
{'aliases': {'-d'}},
|
||||
'log_level':
|
||||
{'choices': ['CRITICAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']}
|
||||
#FIXME: Why the following is not accessible from arg parser?
|
||||
'version': 'OpenVINO Model Converter (ovc) {}'.format(VersionChecker().get_ie_version())},
|
||||
}
|
||||
|
||||
|
||||
# TODO: remove this when internal converting of params to string is removed
|
||||
# TODO: remove this when internal converting of params to string is removed <-- DO IT
|
||||
def get_to_string_methods_for_params():
|
||||
from openvino.tools.ovc.cli_parser import path_to_str_or_object, str_list_to_str, \
|
||||
mean_scale_value_to_str, source_target_layout_to_str, layout_param_to_str, transform_param_to_str, \
|
||||
extensions_to_str_or_extensions_class, batch_to_int, transformations_config_to_str
|
||||
from openvino.tools.ovc.cli_parser import path_to_str_or_object, str_list_to_str, extensions_to_str_or_extensions_class
|
||||
return {
|
||||
'input_model': path_to_str_or_object,
|
||||
'output': str_list_to_str,
|
||||
'mean_values': mean_scale_value_to_str,
|
||||
'scale_values': mean_scale_value_to_str,
|
||||
'source_layout': source_target_layout_to_str,
|
||||
'target_layout': source_target_layout_to_str,
|
||||
'layout': layout_param_to_str,
|
||||
'transform': transform_param_to_str,
|
||||
'extensions': extensions_to_str_or_extensions_class,
|
||||
'batch': batch_to_int,
|
||||
'transformations_config': transformations_config_to_str,
|
||||
'saved_model_tags': str_list_to_str
|
||||
}
|
||||
|
@ -64,10 +64,10 @@ class TagFilter(log.Filter):
|
||||
return True # if regex wasn't set print all logs
|
||||
|
||||
|
||||
def init_logger(lvl: str, silent: bool):
|
||||
def init_logger(lvl: str, verbose: bool):
|
||||
global handler_num
|
||||
log_exp = os.environ.get('MO_LOG_PATTERN')
|
||||
if silent:
|
||||
if not verbose:
|
||||
lvl = 'ERROR'
|
||||
fmt = LvlFormatter(lvl=lvl)
|
||||
handler = log.StreamHandler()
|
||||
@ -89,72 +89,3 @@ def restore_logger_state(state: tuple):
|
||||
logger.setLevel(level)
|
||||
logger.filters = filters
|
||||
logger.handlers = handlers
|
||||
|
||||
|
||||
def progress_bar(function: callable):
|
||||
"""
|
||||
Decorator for model conversion pipeline progress display
|
||||
Works in combination with function: mo.utils.class_registration.apply_transform
|
||||
"""
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
for arg in ['graph', 'curr_transform_num', 'num_transforms']:
|
||||
msg = 'Progress bar decorator is enabled for Model Conversion API transformation applying cycle only. ' \
|
||||
'Argument `{}` {}'
|
||||
|
||||
assert arg in kwargs, msg.format(arg, 'is missing')
|
||||
assert kwargs[arg] is not None, msg.format(arg, 'should not be None')
|
||||
|
||||
if 'progress' in kwargs['graph'].graph['cmd_params'] and kwargs['graph'].graph['cmd_params'].progress:
|
||||
bar_len = 20
|
||||
total_replacers_count = kwargs['num_transforms']
|
||||
|
||||
def progress(i):
|
||||
return int((i + 1) / total_replacers_count * bar_len)
|
||||
|
||||
def percent(i):
|
||||
return (i + 1) / total_replacers_count * 100
|
||||
|
||||
end = '' if not kwargs['graph'].graph['cmd_params'].stream_output else '\n'
|
||||
curr_i = kwargs['curr_transform_num']
|
||||
print('\rProgress: [{:{}}]{:>7.2f}% done'.format('.' * progress(curr_i), bar_len, percent(curr_i)), end=end)
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
function(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
def progress_printer(argv: Namespace):
|
||||
"""
|
||||
A higher-order factory function returning a configurable callback displaying a progress bar
|
||||
Depending on the configuration stored in 'argv' the progress bar can be one-line, multi-line, or silent.
|
||||
"""
|
||||
def _progress_bar(progress, total, completed, endline):
|
||||
bar_len = 20
|
||||
|
||||
def dots():
|
||||
return '.' * int(progress * bar_len)
|
||||
|
||||
print('\rProgress: [{:{}}]{:>7.2f}% done'.format(dots(), bar_len, progress*100), end=endline)
|
||||
sys.stdout.flush()
|
||||
|
||||
def no_progress_bar(progress, total, completed):
|
||||
""" A 'dummy' progressbar which doesn't print anything """
|
||||
pass
|
||||
|
||||
def oneline_progress_bar(progress, total, completed):
|
||||
""" A callback that always prints the progress in the same line (mimics real GUI progress bar)"""
|
||||
_progress_bar(progress, total, completed, '')
|
||||
|
||||
def newline_progress_bar(progress, total, completed):
|
||||
""" A callback that prints an updated progress bar in separate lines """
|
||||
_progress_bar(progress, total, completed, '\n')
|
||||
|
||||
if "progress" in argv and argv.progress:
|
||||
if "stream_output" in argv and argv.stream_output:
|
||||
return newline_progress_bar
|
||||
else:
|
||||
return oneline_progress_bar
|
||||
else:
|
||||
return no_progress_bar
|
||||
|
@ -21,13 +21,12 @@ def main():
|
||||
if ngraph_function is None:
|
||||
return 1
|
||||
|
||||
output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
|
||||
model_path_no_ext = os.path.normpath(os.path.join(output_dir, argv.model_name))
|
||||
output_dir = os.getcwd()
|
||||
model_path_no_ext = os.path.normpath(os.path.join(output_dir, argv.output_model))
|
||||
model_path = model_path_no_ext + '.xml'
|
||||
|
||||
serialize(ngraph_function, model_path.encode('utf-8'), model_path.replace('.xml', '.bin').encode('utf-8'))
|
||||
|
||||
print('[ SUCCESS ] Generated IR version {} model.'.format(get_ir_version()))
|
||||
print('[ SUCCESS ] XML file: {}'.format(model_path))
|
||||
print('[ SUCCESS ] BIN file: {}'.format(model_path.replace('.xml', '.bin')))
|
||||
return 0
|
||||
|
@ -4,7 +4,6 @@
|
||||
import argparse
|
||||
|
||||
from openvino.runtime import Model # pylint: disable=no-name-in-module,import-error
|
||||
from openvino.tools.ovc.cli_parser import parse_transform
|
||||
from openvino.tools.ovc.moc_frontend.preprocessing import apply_preprocessing
|
||||
|
||||
|
||||
@ -21,15 +20,14 @@ def moc_emit_ir(ngraph_function: Model, argv: argparse.Namespace):
|
||||
from openvino._offline_transformations import compress_quantize_weights_transformation # pylint: disable=no-name-in-module,import-error
|
||||
compress_quantize_weights_transformation(ngraph_function)
|
||||
|
||||
if argv.framework == "onnx":
|
||||
if argv.framework == "onnx": # TODO: Consider removing
|
||||
# set OldApi map in IR to be executed via OV API 1.x and for parity with legacy MO
|
||||
params_with_custom_types = [] if argv.placeholder_data_types is None \
|
||||
else list(argv.placeholder_data_types.keys())
|
||||
apply_moc_legacy_transformations(ngraph_function, params_with_custom_types)
|
||||
|
||||
apply_user_transformations(ngraph_function, parse_transform(argv.transform))
|
||||
|
||||
if argv.compress_to_fp16:
|
||||
# TODO: Move compression to save_model at the level of main function where serialize is called
|
||||
if not argv.is_python_api_used and argv.compress_to_fp16:
|
||||
from openvino.tools.ovc.moc_frontend.offline_transformations import compress_model
|
||||
compress_model(ngraph_function)
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
import argparse
|
||||
from typing import List
|
||||
|
||||
from openvino.tools.ovc.cli_parser import parse_transform
|
||||
from openvino.tools.ovc.error import Error
|
||||
from openvino.runtime import Model # pylint: disable=no-name-in-module,import-error
|
||||
|
||||
@ -117,7 +116,6 @@ def apply_offline_transformations(func: Model, argv: argparse.Namespace):
|
||||
|
||||
params_with_custom_types = create_params_with_custom_types(argv.packed_user_shapes)
|
||||
apply_moc_legacy_transformations(func, params_with_custom_types)
|
||||
apply_user_transformations(func, parse_transform(argv.transform))
|
||||
|
||||
if "compress_to_fp16" in argv and argv.compress_to_fp16:
|
||||
compress_model(func)
|
||||
|
@ -45,19 +45,18 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
|
||||
:param: moc_front_end: Loaded Frontend for converting input model
|
||||
:return: converted nGraph function ready for serialization
|
||||
"""
|
||||
input_checkpoint = getattr(argv, 'input_checkpoint', None)
|
||||
share_weights = getattr(argv, 'share_weights', True)
|
||||
if argv.input_model and input_checkpoint:
|
||||
|
||||
share_weights = getattr(argv, 'share_weights', True) #FIXME: Should be controlled by default value
|
||||
if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 2:
|
||||
# frozen format with v1 checkpoints
|
||||
input_model = moc_front_end.load([argv.input_model, argv.input_checkpoint], share_weights)
|
||||
elif argv.input_model:
|
||||
input_model = moc_front_end.load(argv.input_model, share_weights)
|
||||
elif argv.saved_model_dir:
|
||||
if argv.saved_model_tags:
|
||||
input_model = moc_front_end.load([argv.saved_model_dir, argv.saved_model_tags], share_weights)
|
||||
assert not hasattr(argv, 'saved_model_tags') or not argv.saved_model_tags
|
||||
input_model = moc_front_end.load([part for part in argv.input_model], share_weights)
|
||||
elif hasattr(argv, 'saved_model_tags') and argv.saved_model_tags:
|
||||
input_model = moc_front_end.load([argv.input_model, argv.saved_model_tags], share_weights)
|
||||
else:
|
||||
input_model = moc_front_end.load(argv.saved_model_dir, share_weights)
|
||||
elif argv.input_meta_graph:
|
||||
input_model = moc_front_end.load(argv.input_model, share_weights)
|
||||
|
||||
'''elif argv.input_meta_graph: # TODO: Cover this case
|
||||
input_model = moc_front_end.load(argv.input_meta_graph, share_weights)
|
||||
if argv.output:
|
||||
# Simulate original behavior with freezing model
|
||||
@ -65,7 +64,7 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
|
||||
# need to simulate similar behavior with natively supported model
|
||||
outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
|
||||
input_model.override_all_outputs([x['node'] for x in outputs])
|
||||
|
||||
'''
|
||||
argv.placeholder_shapes, argv.placeholder_data_types, argv.freeze_placeholder_with_value = convert_params_lists_to_dicts(
|
||||
input_model, argv.placeholder_shapes, argv.placeholder_data_types,
|
||||
argv.freeze_placeholder_with_value, argv.unnamed_freeze_placeholder_with_value)
|
||||
@ -236,83 +235,6 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
|
||||
layout_values = update_layout_to_dict(model_inputs, argv.layout_values,
|
||||
lambda input_place: input_place.get_names())
|
||||
|
||||
deferred_batch_names = []
|
||||
# set batch size for inputs with a static rank
|
||||
# for all other inputs, set it after shape deduction is performed during model conversion
|
||||
if argv.batch is not None and argv.batch > 0:
|
||||
log.debug('Setting batch size to {}'.format(argv.batch))
|
||||
frozen_input_names = list(freeze_placeholder.keys()) if freeze_placeholder else []
|
||||
for place in model_inputs:
|
||||
input_partial_shape = input_model.get_partial_shape(place)
|
||||
input_names = place.get_names()
|
||||
joined_name = ' '.join(place.get_names())
|
||||
assert len(input_names) > 0, "One input place has no names"
|
||||
|
||||
# if this input is frozen, there is no need to set the batch
|
||||
is_frozen_input = len([name for name in input_names if name in frozen_input_names]) > 0
|
||||
if is_frozen_input:
|
||||
# skip the frozen input
|
||||
continue
|
||||
|
||||
if not input_partial_shape.rank.is_static:
|
||||
# found input with dynamic rank, so have to repeat the batch setting after the model conversion
|
||||
deferred_batch_names += input_names
|
||||
continue
|
||||
|
||||
batch_dim, is_default_index = get_dimension_index_by_label(input_partial_shape,
|
||||
place.get_names(), layout_values, 'N', 0)
|
||||
if batch_dim is None:
|
||||
# skip because no batch dimension exists in the input
|
||||
continue
|
||||
|
||||
if is_default_index:
|
||||
# if the batch index is chosen by default, we need to ensure that its size equals -1, 0 or 1
|
||||
validate_batch_in_shape(shape_to_array(input_partial_shape), joined_name)
|
||||
|
||||
assert batch_dim < input_partial_shape.rank.get_length(), \
|
||||
"Incorrect layout is specified for {}:" \
|
||||
" index of the batch dimension is out of range.".format(input_names[0])
|
||||
|
||||
new_partial_shape = copy(input_partial_shape)
|
||||
new_partial_shape[batch_dim] = argv.batch
|
||||
|
||||
log.debug('Input: {}, Old shape: {}, New shape: {}'.format(
|
||||
joined_name, input_partial_shape, new_partial_shape))
|
||||
input_model.set_partial_shape(place, new_partial_shape)
|
||||
|
||||
ov_model = moc_front_end.convert(input_model)
|
||||
|
||||
if argv.batch is not None and argv.batch > 0 and len(deferred_batch_names) > 0:
|
||||
# Frontend convert method can include reverse infer functionality that can deduce undefined input shapes
|
||||
# so try to repeat batch setting again
|
||||
reshape_dict = {}
|
||||
log.debug('Deferred batch setting to size {}'.format(argv.batch))
|
||||
is_batch_clarified = False
|
||||
for model_input in ov_model.inputs:
|
||||
input_name = model_input.any_name
|
||||
input_partial_shape = model_input.get_partial_shape()
|
||||
if input_name in deferred_batch_names and input_partial_shape.rank.is_static:
|
||||
# update input shape with the specified batch for input that originally has dynamic rank
|
||||
batch_dim, is_default_index = get_dimension_index_by_label(input_partial_shape,
|
||||
model_input.get_names(),
|
||||
layout_values, 'N', 0)
|
||||
if batch_dim is None:
|
||||
continue
|
||||
|
||||
if is_default_index:
|
||||
# if the batch index is chosen by default, we need to ensure that its size equals -1, 0 or 1
|
||||
validate_batch_in_shape(shape_to_array(input_partial_shape), input_name)
|
||||
|
||||
assert batch_dim < input_partial_shape.rank.get_length(), \
|
||||
"Incorrect layout is specified for {}: " \
|
||||
"index of the batch dimension is out of range.".format(input_name)
|
||||
input_partial_shape[batch_dim] = argv.batch
|
||||
is_batch_clarified = True
|
||||
|
||||
reshape_dict.update({input_name: input_partial_shape})
|
||||
|
||||
if is_batch_clarified:
|
||||
# call reshape only if batch dimension for one of the input is clarified
|
||||
ov_model.reshape(reshape_dict)
|
||||
|
||||
return ov_model
|
||||
|
@ -14,52 +14,6 @@ from openvino.tools.ovc.error import Error
|
||||
from openvino.tools.ovc.utils import refer_to_faq_msg
|
||||
|
||||
|
||||
def update_mean_scale_to_dict(input_nodes: list, mean_scale_val, scale):
|
||||
"""
|
||||
Internal function. Updates mean/scale values from array to dictionary
|
||||
:param: input_nodes Inputs of model
|
||||
:param: mean_scale_val Parsed 'mean_scale_val' object from command line arguments
|
||||
:param: scale Global scale factor for all inputs from scale command line arguments
|
||||
"""
|
||||
if not isinstance(mean_scale_val, dict):
|
||||
if len(mean_scale_val) != len(input_nodes):
|
||||
raise Error('Numbers of inputs and mean/scale values do not match. ' + refer_to_faq_msg(61))
|
||||
data = copy(mean_scale_val)
|
||||
mean_scale_val = {}
|
||||
for idx, node in enumerate(input_nodes):
|
||||
names_list = list(node.get_tensor().get_names())
|
||||
names_list.sort()
|
||||
if not names_list:
|
||||
continue
|
||||
node_name = names_list[0]
|
||||
mean_scale_val.update(
|
||||
{
|
||||
node_name: {
|
||||
'mean': data[idx][0],
|
||||
'scale': data[idx][1]
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
if scale:
|
||||
for node in input_nodes:
|
||||
names_list = list(node.get_tensor().get_names())
|
||||
names_list.sort()
|
||||
if not names_list:
|
||||
continue
|
||||
node_name = names_list[0]
|
||||
old_val = mean_scale_val[node_name] if node_name in mean_scale_val else None
|
||||
mean_scale_val.update(
|
||||
{
|
||||
node_name: {
|
||||
'mean': old_val['mean'] if old_val and 'mean' in old_val else None,
|
||||
'scale': scale
|
||||
}
|
||||
}
|
||||
)
|
||||
return mean_scale_val
|
||||
|
||||
|
||||
def check_keys_valid(ov_function: Model, dict_to_validate: dict, search_outputs: bool):
|
||||
"""
|
||||
Internal function: checks if keys from cmd line arguments correspond to ov_function's inputs/outputs
|
||||
@ -183,147 +137,6 @@ def find_channels_dimension(shape: PartialShape, num_channels: int, name: str, l
|
||||
}
|
||||
return layout_values
|
||||
|
||||
|
||||
def guess_source_layouts_by_mean_scale(ov_function: Model, layout_values, mean_scale_values: dict):
|
||||
"""
|
||||
Internal function. Try to guess source layout for input by its shape and/or framework
|
||||
:param: ov_function Original model
|
||||
:param: layout_values Existing source/target layout items specified by user
|
||||
:param: mean_scale_values Dictionary with mean/scale values defined for each argument
|
||||
:return: updated layout items with guessed layouts
|
||||
"""
|
||||
for ms_name, mean_scale in mean_scale_values.items():
|
||||
num_channels_mean = len(mean_scale['mean']) if mean_scale['mean'] is not None else 0
|
||||
num_channels_scale = len(mean_scale['scale']) if hasattr(mean_scale['scale'], '__len__') else 0
|
||||
|
||||
if num_channels_mean > 1 and \
|
||||
num_channels_scale > 1 and \
|
||||
num_channels_mean is not num_channels_scale:
|
||||
raise Error('Mean/Scale values for {} have different sizes: {} {}'
|
||||
.format(ms_name, num_channels_mean, num_channels_scale))
|
||||
|
||||
need_guess_channels = num_channels_mean > 1 or num_channels_scale > 1
|
||||
if not need_guess_channels: # Mean/scale is complex and needs 'channels' specified in layout
|
||||
continue
|
||||
|
||||
num_channels = num_channels_mean if num_channels_mean > 1 else num_channels_scale
|
||||
|
||||
for i in range(0, len(ov_function.inputs)):
|
||||
ov_input = ov_function.input(i)
|
||||
|
||||
if not ov_function.get_parameters()[i].layout.empty:
|
||||
continue
|
||||
|
||||
if ms_name not in ov_input.get_tensor().get_names():
|
||||
continue
|
||||
|
||||
layout_item = None
|
||||
for name in ov_input.get_tensor().get_names():
|
||||
if name in layout_values:
|
||||
layout_item = layout_values[name]
|
||||
break
|
||||
|
||||
if layout_item is not None:
|
||||
# User specified some layout, skip guessing
|
||||
continue
|
||||
|
||||
# Guess layout is applicable only when number of channels is '3'
|
||||
if num_channels != 3:
|
||||
raise Error('Can\'t determine channels dimension for {}. '
|
||||
'When number of mean/scale values is {} (not 3), '
|
||||
'please specify layout for input manually'.format(ms_name, num_channels))
|
||||
|
||||
layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(),
|
||||
num_channels=num_channels,
|
||||
name=ms_name,
|
||||
layout_values=layout_values)
|
||||
return layout_values
|
||||
|
||||
|
||||
def check_suitable_for_reverse(layout: Layout, ov_input):
|
||||
"""
|
||||
Internal function. Checks if input with layout is suitable for reversing channels
|
||||
:param: layout Existing source/target layout items specified by user
|
||||
:param: ov_input Model's input
|
||||
:return: True if reverse channels can be applied to input
|
||||
"""
|
||||
if not layout_helpers.has_channels(layout):
|
||||
return False
|
||||
if ov_input.get_partial_shape().rank.is_dynamic:
|
||||
return False
|
||||
|
||||
c_idx = layout_helpers.channels_idx(layout)
|
||||
rank = ov_input.get_partial_shape().rank.get_length()
|
||||
if c_idx < 0:
|
||||
c_idx += rank
|
||||
if c_idx >= rank:
|
||||
raise Error('Layout {} for input {} is inconsistent with shape {}'.format(
|
||||
layout, ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape()))
|
||||
c_num = ov_input.get_partial_shape()[c_idx]
|
||||
return c_num.is_dynamic or c_num.get_length() == 3
|
||||
|
||||
|
||||
def guess_source_layouts_for_reverse_channels(ov_function: Model, layout_values):
|
||||
"""
|
||||
Internal function. Try to guess source layout for input by finding dimension with size=3 (RGB/BGR)
|
||||
Additionally checks existing layouts and detects suitable inputs for reversing of input channels
|
||||
:param: ov_function Original model
|
||||
:param: layout_values Existing source/target layout items specified by user
|
||||
:return: array with suitable parameters for reversing of input channels
|
||||
"""
|
||||
all_params = []
|
||||
suitable_params = []
|
||||
for i in range(0, len(ov_function.inputs)):
|
||||
ov_input = ov_function.input(i)
|
||||
param_info = [ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape()]
|
||||
all_params.append(param_info)
|
||||
|
||||
if not ov_function.get_parameters()[i].layout.empty:
|
||||
if check_suitable_for_reverse(ov_function.get_parameters()[i].layout, ov_input):
|
||||
suitable_params.append(param_info)
|
||||
continue
|
||||
|
||||
layout_item = None
|
||||
first_name = ov_input.get_tensor().get_any_name()
|
||||
for name in ov_input.get_tensor().get_names():
|
||||
if name in layout_values:
|
||||
layout_item = layout_values[name]
|
||||
break
|
||||
|
||||
if layout_item is not None:
|
||||
# RIC transformation is applied before changing layout so only source_layout
|
||||
# should be checked (even is target_layout is also provided)
|
||||
if layout_item.get('source_layout'):
|
||||
if check_suitable_for_reverse(Layout(layout_item['source_layout']), ov_input):
|
||||
suitable_params.append(param_info)
|
||||
continue
|
||||
|
||||
try:
|
||||
layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(),
|
||||
num_channels=3,
|
||||
name=first_name,
|
||||
layout_values=layout_values)
|
||||
except Error as e:
|
||||
log.debug('Reverse input channels guess did not succeed {}'.format(e))
|
||||
else:
|
||||
layout = layout_values[first_name].get('source_layout')
|
||||
if layout and check_suitable_for_reverse(Layout(layout), ov_input):
|
||||
suitable_params.append(param_info)
|
||||
|
||||
if not len(suitable_params):
|
||||
raise Error('Network has {} inputs overall, but none of them are suitable for input channels reversing.\n'
|
||||
'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic '
|
||||
'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}'.format(
|
||||
len(all_params), all_params))
|
||||
elif len(suitable_params) < len(all_params):
|
||||
log.error('Network has {} inputs overall, but only {} of them are suitable for input channels reversing.\n'
|
||||
'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic '
|
||||
'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}\n'
|
||||
'Suitable inputs {}'.format(len(all_params), len(suitable_params), all_params, suitable_params),
|
||||
extra={'is_warning': True})
|
||||
return suitable_params
|
||||
|
||||
|
||||
def update_tensor_names_to_first_in_sorted_list(values_dict: dict, ov_function: Model):
|
||||
if not isinstance(values_dict, dict):
|
||||
return values_dict
|
||||
@ -372,38 +185,14 @@ def apply_preprocessing(ov_function: Model, argv: argparse.Namespace):
|
||||
"""
|
||||
prep = PrePostProcessor(ov_function)
|
||||
|
||||
if 'mean_scale_values' in argv and argv.mean_scale_values:
|
||||
mean_scale_values = argv.mean_scale_values
|
||||
else:
|
||||
mean_scale_values = {}
|
||||
|
||||
# mean_scale_values stores mean/scale values from command line with names which were set by user.
|
||||
# For models with single input scale or mean may be unnamed, so name is set by first tensor name from
|
||||
# names list. This may lead to different naming of preprocessing params for a single node and lead to error.
|
||||
# To make naming for mean/scale values unified, names provided by user are renamed here
|
||||
# by the first tensor name from sorted names list.
|
||||
mean_scale_values = update_tensor_names_to_first_in_sorted_list(mean_scale_values, ov_function)
|
||||
mean_scale_values = update_mean_scale_to_dict(input_nodes=ov_function.inputs,
|
||||
mean_scale_val=mean_scale_values,
|
||||
scale=argv.scale)
|
||||
# On return, mean_scale_values is a dictionary with input names as key and mean/scale pair as value
|
||||
# {'inputName': {'mean': [1., 2., 3.], 'scale': [2.]}}
|
||||
|
||||
layout_values = {}
|
||||
if 'layout_values' in argv and argv.layout_values:
|
||||
layout_values = update_layout_to_dict(ov_function.inputs, argv.layout_values,
|
||||
lambda ov_input: ov_input.get_tensor().get_names())
|
||||
|
||||
check_keys_valid(ov_function=ov_function, dict_to_validate=mean_scale_values, search_outputs=False)
|
||||
check_keys_valid(ov_function=ov_function, dict_to_validate=layout_values, search_outputs=True)
|
||||
|
||||
layout_values = update_layout_is_input_flag(ov_function, layout_values)
|
||||
layout_values = guess_source_layouts_by_mean_scale(ov_function, layout_values, mean_scale_values)
|
||||
need_reverse = 'reverse_input_channels' in argv and argv.reverse_input_channels
|
||||
suitable_params_ric = []
|
||||
if need_reverse:
|
||||
suitable_params_ric = guess_source_layouts_for_reverse_channels(ov_function=ov_function,
|
||||
layout_values=layout_values)
|
||||
|
||||
for node_name, layout_value in layout_values.items():
|
||||
if layout_value.get('source_layout'):
|
||||
@ -417,20 +206,6 @@ def apply_preprocessing(ov_function: Model, argv: argparse.Namespace):
|
||||
else:
|
||||
prep.output(node_name).tensor().set_layout(Layout(layout_value['target_layout']))
|
||||
|
||||
# Apply reverse_input_channels
|
||||
if need_reverse:
|
||||
for name, _ in suitable_params_ric:
|
||||
prep.input(name).preprocess().reverse_channels()
|
||||
log.debug('reverse_input_channels pre-processing applied to {}'.format(name))
|
||||
|
||||
for node_name, node_mean_scale_values in mean_scale_values.items():
|
||||
# Apply mean first, then scale
|
||||
if node_mean_scale_values['mean'] is not None:
|
||||
prep.input(node_name).preprocess().mean(node_mean_scale_values['mean'])
|
||||
if node_mean_scale_values['scale'] is not None:
|
||||
prep.input(node_name).preprocess().scale(node_mean_scale_values['scale'])
|
||||
log.debug('Mean/Scale pre-processing applied to {}'.format(node_name))
|
||||
|
||||
# Apply pre-processing builder to a function
|
||||
ov_function = prep.build()
|
||||
|
||||
|
@ -8,21 +8,20 @@ import numpy as np
|
||||
from openvino.runtime import Tensor, Type, PartialShape
|
||||
from openvino.runtime.utils.types import get_element_type_str
|
||||
|
||||
from openvino.tools.ovc.cli_parser import input_to_input_cut_info, input_shape_to_input_cut_info
|
||||
from openvino.tools.ovc.cli_parser import input_to_input_cut_info
|
||||
from openvino.tools.ovc.error import Error
|
||||
from openvino.tools.ovc.moc_frontend.shape_utils import get_static_shape
|
||||
|
||||
|
||||
def get_pytorch_decoder(model, input_shape, example_inputs, args):
|
||||
def get_pytorch_decoder(model, example_inputs, args):
|
||||
try:
|
||||
from openvino.frontend.pytorch.decoder import TorchScriptPythonDecoder
|
||||
except Exception as e:
|
||||
log.error("PyTorch frontend loading failed")
|
||||
raise e
|
||||
inputs = prepare_torch_inputs(example_inputs, input_shape, args.get("input"), allow_none=True)
|
||||
inputs = prepare_torch_inputs(example_inputs, args.get("input"), allow_none=True)
|
||||
decoder = TorchScriptPythonDecoder(model, example_input=inputs)
|
||||
args['input_model'] = decoder
|
||||
args["framework"] = "pytorch"
|
||||
args["example_input"] = inputs
|
||||
|
||||
return args
|
||||
@ -169,7 +168,7 @@ def get_torch_dtype(dtype):
|
||||
raise Error(f"Unexpected data type for input. Supported torch.dtype, numpy.dtype, ov.Type and str. Got {type(dtype)}")
|
||||
|
||||
|
||||
def prepare_torch_inputs(example_inputs, input_shape, input_info=None, allow_none=False):
|
||||
def prepare_torch_inputs(example_inputs, input_info=None, allow_none=False):
|
||||
import torch
|
||||
inputs = None
|
||||
if example_inputs is not None:
|
||||
@ -190,16 +189,15 @@ def prepare_torch_inputs(example_inputs, input_shape, input_info=None, allow_non
|
||||
inputs[name] = to_torch_tensor(tensor)
|
||||
else:
|
||||
inputs = to_torch_tensor(inputs)
|
||||
elif input_info is not None or input_shape is not None:
|
||||
elif input_info is not None:
|
||||
input_info = input_to_input_cut_info(input_info) or []
|
||||
input_shape_to_input_cut_info(input_shape, input_info)
|
||||
inputs = []
|
||||
inputs_with_names = {}
|
||||
for inp in input_info:
|
||||
shape = inp.shape
|
||||
if shape is None:
|
||||
if not allow_none:
|
||||
raise Error("Please provide input_shape or example_input for all inputs converting PyTorch model.")
|
||||
raise Error("Please provide shape in `input` or `example_input` for all inputs converting PyTorch model.")
|
||||
inputs = None
|
||||
break
|
||||
dtype = get_torch_dtype(inp.type)
|
||||
@ -214,5 +212,5 @@ def prepare_torch_inputs(example_inputs, input_shape, input_info=None, allow_non
|
||||
inputs = inputs_with_names
|
||||
else:
|
||||
if not allow_none:
|
||||
raise Error("Please provide input_shape or example_input for converting PyTorch model.")
|
||||
raise Error("Please provide shapes `input` or `example_input` for converting PyTorch model.")
|
||||
return inputs
|
||||
|
@ -4,7 +4,6 @@
|
||||
import numpy as np
|
||||
from openvino.runtime import PartialShape, Dimension # pylint: disable=no-name-in-module,import-error
|
||||
from openvino.tools.ovc.error import Error
|
||||
from openvino.tools.ovc.cli_parser import get_placeholder_shapes, split_shapes
|
||||
|
||||
|
||||
def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None):
|
||||
@ -63,40 +62,3 @@ def get_dynamic_dims(shape: [PartialShape, list, tuple]):
|
||||
dynamic_dims.append(idx)
|
||||
|
||||
return dynamic_dims
|
||||
|
||||
|
||||
def parse_input_shapes(argv):
|
||||
input_shapes = None
|
||||
if 'input_shape' in argv and argv['input_shape'] is not None:
|
||||
shapes = argv['input_shape']
|
||||
if isinstance(shapes, str):
|
||||
shapes = ["[{}]".format(x) for x in split_shapes(shapes)]
|
||||
if isinstance(shapes, list) or isinstance(shapes, tuple):
|
||||
input_shapes = []
|
||||
is_single_shape = False
|
||||
for shape in shapes:
|
||||
if isinstance(shape, str):
|
||||
_, shape_tuple, _ = get_placeholder_shapes(argv_input=None, argv_input_shape=shape)
|
||||
input_shapes.append(shape_tuple)
|
||||
if is_single_shape:
|
||||
raise Error("Incorrect format of shape.")
|
||||
elif isinstance(shape, int) or isinstance(shape, np.int64) or isinstance(shape, Dimension):
|
||||
is_single_shape = True
|
||||
input_shapes.append(shape)
|
||||
else:
|
||||
input_shapes.append(shape)
|
||||
if is_single_shape:
|
||||
return [input_shapes]
|
||||
else:
|
||||
return input_shapes
|
||||
elif isinstance(shapes, PartialShape):
|
||||
return [shapes]
|
||||
else:
|
||||
try:
|
||||
import torch
|
||||
if isinstance(shapes, torch.Size): # pylint: disable=no-member
|
||||
return [shapes]
|
||||
except ImportError:
|
||||
raise Error("Unknown type of input shape {}.".format(type(shapes)))
|
||||
|
||||
return input_shapes
|
@ -101,39 +101,6 @@ def validate_batch_in_shape(shape, layer_name: str):
|
||||
'You can also specify batch dimension by setting "layout". \n\n')
|
||||
.format(layer_name, shape))
|
||||
|
||||
|
||||
def deduce_legacy_frontend_by_namespace(argv):
|
||||
if not hasattr(argv, 'framework') or not argv.framework:
|
||||
if getattr(argv, 'saved_model_dir', None) or getattr(argv, 'input_meta_graph', None):
|
||||
argv.framework = 'tf'
|
||||
elif getattr(argv, 'input_symbol', None) or getattr(argv, 'pretrained_model_name', None):
|
||||
argv.framework = 'mxnet'
|
||||
elif getattr(argv, 'input_proto', None):
|
||||
argv.framework = 'caffe'
|
||||
elif argv.input_model is None:
|
||||
raise Error('Path to input model is required: use "input_model".')
|
||||
else:
|
||||
argv.framework = guess_framework_by_ext(argv.input_model)
|
||||
|
||||
return map(lambda x: argv.framework == x, ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'])
|
||||
|
||||
|
||||
def guess_framework_by_ext(input_model_path: str) -> int:
|
||||
if re.match(r'^.*\.caffemodel$', input_model_path):
|
||||
return 'caffe'
|
||||
elif re.match(r'^.*\.pb$', input_model_path):
|
||||
return 'tf'
|
||||
elif re.match(r'^.*\.pbtxt$', input_model_path):
|
||||
return 'tf'
|
||||
elif re.match(r'^.*\.params$', input_model_path):
|
||||
return 'mxnet'
|
||||
elif re.match(r'^.*\.nnet$', input_model_path):
|
||||
return 'kaldi'
|
||||
elif re.match(r'^.*\.mdl', input_model_path):
|
||||
return 'kaldi'
|
||||
elif re.match(r'^.*\.onnx$', input_model_path):
|
||||
return 'onnx'
|
||||
|
||||
def get_ir_version():
|
||||
"""
|
||||
Default IR version.
|
||||
|
@ -9,8 +9,7 @@ from contextlib import redirect_stdout
|
||||
from unittest.mock import patch
|
||||
|
||||
from openvino.tools.ovc.main import main
|
||||
from openvino.tools.ovc.get_ov_update_message import get_tf_fe_message, get_compression_message
|
||||
from openvino.tools.ovc.get_ov_update_message import get_try_legacy_fe_message
|
||||
from openvino.tools.ovc.get_ov_update_message import get_compression_message
|
||||
|
||||
|
||||
def arg_parse_helper(input_model,
|
||||
@ -18,41 +17,19 @@ def arg_parse_helper(input_model,
|
||||
use_new_frontend,
|
||||
input_model_is_text,
|
||||
framework,
|
||||
compress_to_fp16=False,
|
||||
freeze_placeholder_with_value=None,
|
||||
tensorflow_object_detection_api_pipeline_config=None):
|
||||
compress_to_fp16=False):
|
||||
path = os.path.dirname(__file__)
|
||||
input_model = os.path.join(path, "test_models", input_model)
|
||||
|
||||
return argparse.Namespace(
|
||||
input_model=input_model,
|
||||
use_legacy_frontend=use_legacy_frontend,
|
||||
use_new_frontend=use_new_frontend,
|
||||
framework=framework,
|
||||
input_model_is_text=input_model_is_text,
|
||||
log_level='INFO',
|
||||
silent=True,
|
||||
model_name=None,
|
||||
verbose=False,
|
||||
output_model=None,
|
||||
transform=[],
|
||||
scale=None,
|
||||
output=None,
|
||||
input=None,
|
||||
input_shape=None,
|
||||
batch=None,
|
||||
input_checkpoint=None,
|
||||
saved_model_dir=None,
|
||||
input_meta_graph=None,
|
||||
saved_model_tags=None,
|
||||
output_dir='.',
|
||||
mean_values=(),
|
||||
scale_values=(),
|
||||
layout={},
|
||||
source_layout={},
|
||||
target_layout={},
|
||||
freeze_placeholder_with_value=freeze_placeholder_with_value,
|
||||
data_type=None,
|
||||
tensorflow_custom_operations_config_update=None,
|
||||
tensorflow_object_detection_api_pipeline_config=tensorflow_object_detection_api_pipeline_config,
|
||||
compress_to_fp16=compress_to_fp16,
|
||||
extensions=None
|
||||
)
|
||||
@ -63,60 +40,11 @@ class TestInfoMessagesTFFE(unittest.TestCase):
|
||||
return_value=arg_parse_helper(input_model="model_int32.pbtxt",
|
||||
use_legacy_frontend=False, use_new_frontend=True,
|
||||
framework=None, input_model_is_text=True))
|
||||
def test_api20_only(self, mock_argparse):
|
||||
f = io.StringIO()
|
||||
with redirect_stdout(f):
|
||||
main()
|
||||
std_out = f.getvalue()
|
||||
tf_fe_message_found = get_tf_fe_message() in std_out
|
||||
assert tf_fe_message_found
|
||||
|
||||
@patch('openvino.tools.ovc.convert_impl.driver', side_effect=Exception('MESSAGE'))
|
||||
def run_fail_tf_fe(self, mock_driver):
|
||||
from openvino.tools.ovc import convert_model
|
||||
path = os.path.dirname(__file__)
|
||||
convert_model(os.path.join(path, "test_models", "model_int32.pbtxt"), silent=False)
|
||||
|
||||
def test_suggest_legacy_fe(self):
|
||||
f = io.StringIO()
|
||||
with redirect_stdout(f):
|
||||
try:
|
||||
self.run_fail_tf_fe()
|
||||
except:
|
||||
pass
|
||||
std_out = f.getvalue()
|
||||
assert get_try_legacy_fe_message() in std_out
|
||||
|
||||
|
||||
class TestInfoMessagesTFFEWithFallback(unittest.TestCase):
|
||||
@patch('argparse.ArgumentParser.parse_args',
|
||||
return_value=arg_parse_helper(input_model="model_switch_merge.pbtxt",
|
||||
use_legacy_frontend=False, use_new_frontend=False,
|
||||
framework=None, input_model_is_text=True,
|
||||
freeze_placeholder_with_value="is_training->False"))
|
||||
def test_tf_fe_message_fallback(self, mock_argparse):
|
||||
f = io.StringIO()
|
||||
with redirect_stdout(f):
|
||||
main()
|
||||
std_out = f.getvalue()
|
||||
tf_fe_message_found = get_try_legacy_fe_message() in std_out
|
||||
assert tf_fe_message_found, 'TF FE Info message is found for the fallback case'
|
||||
|
||||
@patch('argparse.ArgumentParser.parse_args',
|
||||
return_value=arg_parse_helper(input_model="model_int32.pbtxt",
|
||||
use_legacy_frontend=False, use_new_frontend=True,
|
||||
compress_to_fp16=True,
|
||||
framework=None, input_model_is_text=True,
|
||||
tensorflow_object_detection_api_pipeline_config="config.yml"))
|
||||
def test_tf_fe_message_fallback(self, mock_argparse):
|
||||
f = io.StringIO()
|
||||
with redirect_stdout(f):
|
||||
main()
|
||||
std_out = f.getvalue()
|
||||
tf_fe_message_found = "The provided option \"tensorflow_object_detection_api_pipeline_config\" " \
|
||||
"refers to legacy functionality. Please try to install openvino-dev and " \
|
||||
"use convert_model() from openvino.tools.mo." in std_out
|
||||
assert not tf_fe_message_found, 'TF FE Info message is found for the fallback case'
|
||||
convert_model(os.path.join(path, "test_models", "model_int32.pbtxt"), verbose=True)
|
||||
|
||||
|
||||
class TestInfoMessagesCompressFP16(unittest.TestCase):
|
||||
|
@ -13,18 +13,12 @@ from openvino.tools.ovc.convert import convert_model
|
||||
|
||||
@generator
|
||||
class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
def basic(self, input_model, argv_input, inputs, dtype, expected, freeze_placeholder_with_value=None,
|
||||
input_shape=None, only_conversion=False, input_model_is_text=True, use_new_frontend=True,
|
||||
use_legacy_frontend=False):
|
||||
def basic(self, input_model, argv_input, inputs, dtype, expected, only_conversion=False):
|
||||
path = os.path.dirname(__file__)
|
||||
input_model = os.path.join(path, "test_models", input_model)
|
||||
|
||||
try:
|
||||
model = convert_model(input_model, input=argv_input,
|
||||
freeze_placeholder_with_value=freeze_placeholder_with_value,
|
||||
input_shape=input_shape, input_model_is_text=input_model_is_text,
|
||||
use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend,
|
||||
framework="tf")
|
||||
model = convert_model(input_model, input=argv_input)
|
||||
except Exception as ex:
|
||||
self.fail("Model conversion failed due to error: {}".format(ex))
|
||||
|
||||
@ -128,31 +122,27 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
None
|
||||
),
|
||||
(
|
||||
None,
|
||||
"in1,in2,cond->False",
|
||||
{"in1": np.array([2.0, 4.0, 6.0], dtype=np.float32),
|
||||
"in2": np.array([1.0, 3.0, 5.0], dtype=np.float32)},
|
||||
np.array([2, 4, 6], dtype=np.float32),
|
||||
np.float32,
|
||||
"cond->False",
|
||||
None,
|
||||
True # fill a bug to investigate why compilation of this model is hang on
|
||||
),
|
||||
# case: input_shape + freeze_placeholder_with_value
|
||||
(
|
||||
None,
|
||||
"in2,in1->[2.0 4.0 6.0],cond->True",
|
||||
{"in2": np.array([1.0, 3.0, 5.0], dtype=np.float32)},
|
||||
np.array([2, 4, 6], dtype=np.float32),
|
||||
np.float32,
|
||||
"in1->[2.0 4.0 6.0],cond->True",
|
||||
"[3]",
|
||||
False
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_bool2(self, input_freezing_value, inputs, expected,
|
||||
dtype=None, freeze_placeholder_with_value=None, input_shape=None, only_conversion=False):
|
||||
self.basic("model_bool2.pbtxt", input_freezing_value, inputs, dtype, expected, freeze_placeholder_with_value,
|
||||
input_shape, only_conversion)
|
||||
dtype=None, only_conversion=False):
|
||||
self.basic("model_bool2.pbtxt", input_freezing_value, inputs, dtype, expected,
|
||||
only_conversion)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
@ -173,10 +163,9 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
],
|
||||
)
|
||||
def test_cutting_fp32(self, input_freezing_value, inputs, expected,
|
||||
dtype=None, freeze_placeholder_with_value=None, input_shape=None, only_conversion=False):
|
||||
dtype=None, only_conversion=False):
|
||||
self.basic("model_three_inputs.pbtxt", input_freezing_value, inputs, dtype, expected,
|
||||
freeze_placeholder_with_value,
|
||||
input_shape, only_conversion, True)
|
||||
only_conversion)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
@ -204,11 +193,9 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
],
|
||||
)
|
||||
def test_placeholder_with_default(self, inputs, inputs_data, expected,
|
||||
dtype=None, freeze_placeholder_with_value=None, input_shape=None,
|
||||
only_conversion=False):
|
||||
dtype=None, only_conversion=False):
|
||||
self.basic("placeholder_with_default.pbtxt", inputs, inputs_data, dtype, expected,
|
||||
freeze_placeholder_with_value,
|
||||
input_shape, only_conversion, True)
|
||||
only_conversion)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
@ -229,29 +216,12 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
],
|
||||
)
|
||||
def test_freeze_placeholder_with_unknown_rank(self, inputs, inputs_data, expected,
|
||||
dtype=None, freeze_placeholder_with_value=None, input_shape=None,
|
||||
only_conversion=False):
|
||||
dtype=None, only_conversion=False):
|
||||
self.basic("mul_with_unknown_rank_y.pbtxt", inputs, inputs_data, dtype, expected,
|
||||
freeze_placeholder_with_value,
|
||||
input_shape, only_conversion, True)
|
||||
|
||||
|
||||
def test_conversion_failure_fallback_use_new_frontend(self):
|
||||
with self.assertRaisesRegex(Exception,
|
||||
"\[TensorFlow Frontend\] Internal error, no translator found for operation\(s\)\: "
|
||||
"Enter\, Exit\, LoopCond\, Merge\, NextIteration\, Switch\, TensorArrayGatherV3\, "
|
||||
"TensorArraySizeV3\, TensorArrayV3"):
|
||||
self.basic("ctc_model_based.pbtxt", None, None, None, None,
|
||||
None, None, True, True, True, False)
|
||||
|
||||
@unittest.skip("88349: Fix auto-pruning in legacy FE")
|
||||
def test_conversion_model_oneshot_iterator_use_legacy_frontend(self):
|
||||
self.basic("model_oneshot_iterator.pbtxt", None, None, None, None,
|
||||
None, None, True, True, False, True)
|
||||
only_conversion)
|
||||
|
||||
def test_conversion_model_oneshot_iterator_default(self):
|
||||
self.basic("model_oneshot_iterator.pbtxt", None, None, None, None,
|
||||
None, None, True, True, False, False)
|
||||
self.basic("model_oneshot_iterator.pbtxt", None, None, None, None, True)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
@ -272,9 +242,7 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
@unittest.skip("109220: Use generating script for this test model instead of Git LFS")
|
||||
def test_conversion_model_with_non_standard_extension(self, input_freezing_value, inputs, expected,
|
||||
dtype):
|
||||
self.basic("model_fp32.frozen", input_freezing_value, inputs, dtype, expected, only_conversion=False,
|
||||
input_model_is_text=False, use_new_frontend=True,
|
||||
use_legacy_frontend=False)
|
||||
self.basic("model_fp32.frozen", input_freezing_value, inputs, dtype, expected, only_conversion=False)
|
||||
|
||||
@unittest.skip("109220: Make TF FE to return the error")
|
||||
def test_conversion_dir_model(self):
|
||||
@ -282,8 +250,7 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
"Internal error or inconsistent input model: the frontend supports "
|
||||
"only frozen binary protobuf format."):
|
||||
self.basic(".", None, None, None, None,
|
||||
only_conversion=True, input_model_is_text=False, use_new_frontend=True,
|
||||
use_legacy_frontend=False)
|
||||
only_conversion=True)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
@ -300,8 +267,7 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
],
|
||||
)
|
||||
def test_conversion_pbtxt_model_with_inference(self, inputs, expected, dtype):
|
||||
self.basic("model_with_if.pbtxt", None, inputs, dtype, expected, only_conversion=False,
|
||||
input_model_is_text=False, use_new_frontend=True, use_legacy_frontend=False)
|
||||
self.basic("model_with_if.pbtxt", None, inputs, dtype, expected, only_conversion=False)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
@ -311,18 +277,16 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
"x[2,3]",
|
||||
{"x": np.array([[12, 13, 10], [11, 14, 16]], dtype=np.float32)},
|
||||
np.array([[12, 13, 10], [11, 14, 16]], dtype=np.float32),
|
||||
np.float32, True, False,
|
||||
np.float32
|
||||
),
|
||||
(
|
||||
"model_mul_with_undefined_constant.pbtxt",
|
||||
"x[2]",
|
||||
{"x": np.array([11, -12], dtype=np.int32)},
|
||||
np.array([0, 0], dtype=np.int32),
|
||||
np.int32, True, False,
|
||||
np.int32
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_conversion_model_with_undefined_constant(self, model_name, argv_input, inputs, expected, dtype,
|
||||
use_new_frontend, use_legacy_frontend):
|
||||
self.basic(model_name, argv_input, inputs, dtype, expected, only_conversion=False,
|
||||
input_model_is_text=True, use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend)
|
||||
def test_conversion_model_with_undefined_constant(self, model_name, argv_input, inputs, expected, dtype):
|
||||
self.basic(model_name, argv_input, inputs, dtype, expected, only_conversion=False)
|
||||
|
@ -12,31 +12,9 @@ from openvino.tools.ovc.convert import convert_model
|
||||
|
||||
@generator
|
||||
class TestMoFreezePlaceholderTFFE(unittest.TestCase):
|
||||
@generate(
|
||||
*[
|
||||
# the default frontend
|
||||
(
|
||||
False, False, None
|
||||
),
|
||||
(
|
||||
False, False, "tf"
|
||||
),
|
||||
# new frontend
|
||||
(
|
||||
True, False, None
|
||||
),
|
||||
(
|
||||
True, False, "tf"
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_conversion_fake_pb_model(self, use_new_frontend, use_legacy_frontend, framework):
|
||||
with self.assertRaisesRegex(Exception,
|
||||
"Internal error or inconsistent input model: the frontend supports frozen formats"
|
||||
" \(.pb and .pbtxt\), SavedModel and MetaGraph \(.meta\), and v1 checkpoints."):
|
||||
def test_conversion_fake_pb_model(self):
|
||||
# TODO: Should FEs give detailed report why a model is rejected and should we print out the report?
|
||||
with self.assertRaisesRegex(Exception, "Cannot recognize input model."):
|
||||
path = os.path.dirname(__file__)
|
||||
input_model = os.path.join(path, "test_models", "fake.pb")
|
||||
|
||||
convert_model(input_model,
|
||||
use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend,
|
||||
framework=framework)
|
||||
convert_model(input_model)
|
@ -36,7 +36,6 @@ class TestBasicConversion(unittest.TestCase):
|
||||
checkpoint_byte_stream = self.prepare_checkpoint_v1()
|
||||
ckpt_file.write(bytes(checkpoint_byte_stream))
|
||||
ckpt_file.close()
|
||||
basic_check(input_model="model_with_variable_v1.pbtxt", argv_input=None,
|
||||
basic_check(input_model=["model_with_variable_v1.pbtxt", ckpt_file.name], argv_input=None,
|
||||
input_data={'input1': np.array([[1]], dtype=np.int64)},
|
||||
expected_dtype=np.int64, expected_value=np.array([[14108583]], dtype=np.int64),
|
||||
use_new_frontend=True, use_legacy_frontend=False, input_checkpoint=ckpt_file.name)
|
||||
expected_dtype=np.int64, expected_value=np.array([[14108583]], dtype=np.int64))
|
||||
|
@ -12,86 +12,3 @@ from openvino.runtime import Model
|
||||
from openvino.runtime import PartialShape, Dimension
|
||||
from openvino.tools.ovc.convert import convert_model
|
||||
from openvino.tools.ovc.error import Error
|
||||
|
||||
|
||||
@generator
|
||||
class TestConversionWithBatchAndLayout(unittest.TestCase):
|
||||
def basic_check(self, model_name: str, batch: int, layout: str, refs_shapes: dict):
|
||||
path = os.path.dirname(__file__)
|
||||
input_model = os.path.join(path, "test_models", model_name)
|
||||
ov_model = convert_model(input_model, batch=batch, layout=layout)
|
||||
|
||||
for ov_input in ov_model.inputs:
|
||||
input_name = ov_input.any_name
|
||||
assert input_name in refs_shapes, "No reference input shape is found for {}".format(input_name)
|
||||
input_shape = ov_input.get_partial_shape()
|
||||
ref_shape = refs_shapes[input_name]
|
||||
assert input_shape == ref_shape, "Incorrect shape for {} input:" \
|
||||
" expected shape - {}, actual shape - {}".format(input_name, ref_shape,
|
||||
input_shape)
|
||||
|
||||
@unittest.skip("Fix importing of openvino.test_utils in Jenkins")
|
||||
def test_basic_model_no_layout(self):
|
||||
from openvino.test_utils import compare_functions
|
||||
path = os.path.dirname(__file__)
|
||||
input_model = os.path.join(path, "test_models", "model_fp32.pbtxt")
|
||||
ov_model = convert_model(input_model)
|
||||
|
||||
# compare with the reference graph
|
||||
param1 = opset11.parameter([2, 2], name="in1", dtype=np.float32)
|
||||
param2 = opset11.parameter([2, 2], name="in2", dtype=np.float32)
|
||||
add = opset11.add(param1, param2, name="add")
|
||||
ref_model = Model(add, [param1, param2])
|
||||
flag, msg = compare_functions(ov_model, ref_model, compare_tensor_names=False)
|
||||
assert flag, msg
|
||||
|
||||
@generate(
|
||||
*[
|
||||
(
|
||||
"model_fp32.pbtxt", 5, "in1(cn),in2(cn)",
|
||||
{"in1": PartialShape([2, 5]), "in2": PartialShape([2, 5])},
|
||||
),
|
||||
(
|
||||
"model_fp32.pbtxt", 9, "in1(nc),in2(nc)",
|
||||
{"in1": PartialShape([9, 2]), "in2": PartialShape([9, 2])},
|
||||
),
|
||||
(
|
||||
"model_fp32.pbtxt", 7, "in1(?c),in2(?c)",
|
||||
{"in1": PartialShape([2, 2]), "in2": PartialShape([2, 2])},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_basic_model_with_layout(self, model_name: str, batch: int, layout: str, refs_shapes: dict):
|
||||
self.basic_check(model_name, batch, layout, refs_shapes)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
(
|
||||
"model_with_convolution_dynamic_rank.pbtxt", 7, "x(n???),kernel(????)",
|
||||
{"x": PartialShape([7, Dimension.dynamic(), Dimension.dynamic(), Dimension.dynamic()]),
|
||||
"kernel": PartialShape([2, 2, 3, 1])},
|
||||
),
|
||||
(
|
||||
"model_with_convolution_dynamic_rank.pbtxt", 3, "x(???n),kernel(??n?)",
|
||||
{"x": PartialShape([Dimension.dynamic(), Dimension.dynamic(), Dimension.dynamic(), 3]),
|
||||
"kernel": PartialShape([2, 2, 3, 1])},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_model_with_convolution_dynamic_rank(self, model_name: str, batch: int, layout: str, refs_shapes: dict):
|
||||
self.basic_check(model_name, batch, layout, refs_shapes)
|
||||
|
||||
@generate(
|
||||
*[
|
||||
(
|
||||
"model_fp32.pbtxt", 17, "",
|
||||
{},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_model_expected_failure(self, model_name: str, batch: int, layout: str, refs_shapes: dict):
|
||||
# try to override batch size by default index (without specifying layout)
|
||||
with self.assertRaisesRegex(Error,
|
||||
"When you use \"batch\" option, Model Conversion API applies its value to the first "
|
||||
"element of the shape if it is equal to -1, 0 or 1\."):
|
||||
self.basic_check(model_name, batch, layout, refs_shapes)
|
||||
|
@ -9,17 +9,15 @@ from openvino.runtime import Core
|
||||
from openvino.tools.ovc.convert import convert_model
|
||||
|
||||
|
||||
def basic_check(input_model, argv_input, input_data, expected_dtype, expected_value, freeze_placeholder_with_value=None,
|
||||
input_shape=None, only_conversion=False, input_model_is_text=True, use_new_frontend=True,
|
||||
use_legacy_frontend=False, extensions=None, input_checkpoint=None):
|
||||
def basic_check(input_model, argv_input, input_data, expected_dtype, expected_value, \
|
||||
only_conversion=False, input_model_is_text=True, use_new_frontend=True, extensions=None):
|
||||
path = os.path.dirname(__file__)
|
||||
if isinstance(input_model, (tuple, list)):
|
||||
input_model = tuple(os.path.join(path, "test_models", part) for part in input_model)
|
||||
else:
|
||||
input_model = os.path.join(path, "test_models", input_model)
|
||||
|
||||
ov_model = convert_model(input_model, input=argv_input,
|
||||
freeze_placeholder_with_value=freeze_placeholder_with_value,
|
||||
input_shape=input_shape, input_model_is_text=input_model_is_text,
|
||||
use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend,
|
||||
framework="tf", extensions=extensions, input_checkpoint=input_checkpoint)
|
||||
ov_model = convert_model(input_model, input=argv_input, extensions=extensions)
|
||||
|
||||
if only_conversion:
|
||||
return ov_model
|
||||
|
@ -6,10 +6,11 @@ import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
from generator import generator, generate
|
||||
from openvino.runtime import serialize, InputCutInfo, LayoutMap
|
||||
from openvino.runtime import serialize
|
||||
from openvino.tools.ovc import InputCutInfo
|
||||
|
||||
from unit_tests.ovc.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry
|
||||
from utils import create_onnx_model, save_to_onnx
|
||||
from unit_tests.ovc.convert.utils import create_onnx_model, save_to_onnx
|
||||
|
||||
|
||||
@generator
|
||||
@ -57,11 +58,10 @@ class ConvertImportMOTest(UnitTestWithMockedTelemetry):
|
||||
@generate(*[
|
||||
({}),
|
||||
({'input': InputCutInfo(name='LeakyRelu_out', shape=None, type=None, value=None)}),
|
||||
({'layout': {'input': LayoutMap(source_layout='NCHW', target_layout='NHWC')}}),
|
||||
])
|
||||
# Checks convert import from openvino.tools.mo
|
||||
def test_import(self, params):
|
||||
from openvino.runtime import convert_model
|
||||
from openvino.tools.ovc import convert_model
|
||||
|
||||
with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir:
|
||||
model = create_onnx_model()
|
||||
@ -73,7 +73,7 @@ class ConvertImportMOTest(UnitTestWithMockedTelemetry):
|
||||
assert os.path.exists(out_xml)
|
||||
|
||||
def test_input_model_path(self):
|
||||
from openvino.runtime import convert_model
|
||||
from openvino.tools.ovc import convert_model
|
||||
|
||||
with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir:
|
||||
model = self.create_onnx_model()
|
||||
@ -87,14 +87,14 @@ class ConvertImportMOTest(UnitTestWithMockedTelemetry):
|
||||
|
||||
|
||||
def test_unnamed_input_model(self):
|
||||
from openvino.runtime import convert_model
|
||||
from openvino.tools.ovc import convert_model
|
||||
with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir:
|
||||
model = self.create_onnx_model()
|
||||
model_path = save_to_onnx(model, tmpdir)
|
||||
out_xml = os.path.join(tmpdir, "model.xml")
|
||||
|
||||
ov_model = convert_model(model_path)
|
||||
serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8'))
|
||||
#serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8'))
|
||||
|
||||
#TODO: check that model is correct
|
||||
|
||||
|
@ -7,7 +7,8 @@ from pathlib import Path
|
||||
|
||||
from generator import generator
|
||||
from openvino.runtime import get_version as get_rt_version
|
||||
from openvino.runtime import serialize, convert_model
|
||||
from openvino.runtime import serialize
|
||||
from openvino.tools.ovc import convert_model
|
||||
|
||||
from unit_tests.ovc.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry
|
||||
from utils import save_to_onnx
|
||||
@ -58,7 +59,6 @@ class MetaDataTest(UnitTestWithMockedTelemetry):
|
||||
def ref_meta_data():
|
||||
return {
|
||||
'Runtime_version': get_rt_version(),
|
||||
'legacy_frontend': "False",
|
||||
'conversion_parameters': {
|
||||
'input_model': Path.joinpath(Path("DIR"), Path("model.onnx")),
|
||||
}
|
||||
|
@ -2,60 +2,13 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
from openvino.runtime import Layout, PartialShape, Dimension, Shape, Type
|
||||
from openvino.runtime import Layout, Dimension
|
||||
|
||||
from openvino.runtime import InputCutInfo, LayoutMap
|
||||
from openvino.tools.ovc.cli_parser import mean_scale_value_to_str, \
|
||||
transform_param_to_str, str_list_to_str, source_target_layout_to_str, layout_param_to_str
|
||||
from openvino.tools.ovc.cli_parser import str_list_to_str
|
||||
from unit_tests.ovc.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry
|
||||
|
||||
|
||||
class TestConvertingConvertArgumentsToString(UnitTestWithMockedTelemetry):
|
||||
def test_mean_scale_value_to_str(self):
|
||||
values = [0.5, 1.3, 0.67]
|
||||
self.assertTrue(mean_scale_value_to_str(values) == "[0.5,1.3,0.67]")
|
||||
|
||||
values = {"input": [0.5, 1.3, 0.67]}
|
||||
self.assertTrue(mean_scale_value_to_str(values) == "input[0.5,1.3,0.67]")
|
||||
|
||||
values = {"input1": [0.5, 1.3, 0.67], "input2": [4.2, 6.7, 3.15], "input3": [0.757, 4.6, 7.3]}
|
||||
self.assertTrue(mean_scale_value_to_str(values) ==
|
||||
"input1[0.5,1.3,0.67],input2[4.2,6.7,3.15],input3[0.757,4.6,7.3]")
|
||||
|
||||
self.assertRaises(Exception, mean_scale_value_to_str, **{"value": {("a", "b"): [0.5, 1.3, 0.67]}})
|
||||
self.assertRaises(Exception, mean_scale_value_to_str, **{"value": {"name": Dimension(1)}})
|
||||
self.assertRaises(Exception, mean_scale_value_to_str, **{"value": Dimension(1)})
|
||||
|
||||
def test_transform_param_to_str(self):
|
||||
transform = 'MakeStateful'
|
||||
self.assertTrue(transform_param_to_str(transform) == "MakeStateful")
|
||||
|
||||
transform1 = ('LowLatency2', {'use_const_initializer': False})
|
||||
self.assertTrue(transform_param_to_str(transform1) ==
|
||||
"LowLatency2[use_const_initializer=False]")
|
||||
|
||||
transform2 = ('MakeStateful', {'param_res_names': {
|
||||
'input_name_1': 'output_name_1', 'input_name_2': 'output_name_2'}})
|
||||
self.assertTrue(transform_param_to_str(transform2) ==
|
||||
"MakeStateful[param_res_names={\'input_name_1\':\'output_name_1\',"
|
||||
"\'input_name_2\':\'output_name_2\'}]")
|
||||
|
||||
transform = [transform1, transform2]
|
||||
|
||||
self.assertTrue(transform_param_to_str(transform) == "LowLatency2[use_const_initializer=False],"
|
||||
"MakeStateful[param_res_names={"
|
||||
"\'input_name_1\':\'output_name_1\',"
|
||||
"\'input_name_2\':\'output_name_2\'}]")
|
||||
|
||||
self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2',
|
||||
{'use_const_initializer': False},
|
||||
"param")})
|
||||
self.assertRaises(Exception, transform_param_to_str, **{"value": (("a", "b"), {})})
|
||||
self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2', Dimension(1))})
|
||||
self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2',
|
||||
{('a', 'b'): False})})
|
||||
self.assertRaises(Exception, transform_param_to_str, **{"value": Dimension(1)})
|
||||
|
||||
def test_str_list_to_str(self):
|
||||
list_str = ["data1", "data2", "data3"]
|
||||
self.assertTrue(str_list_to_str(list_str) == "data1,data2,data3")
|
||||
@ -65,44 +18,3 @@ class TestConvertingConvertArgumentsToString(UnitTestWithMockedTelemetry):
|
||||
|
||||
self.assertRaises(Exception, str_list_to_str, **{"values": [int, 1]})
|
||||
self.assertRaises(Exception, str_list_to_str, **{"values": Dimension(1)})
|
||||
|
||||
def test_source_target_layout_to_str(self):
|
||||
layout = {"input1": Layout("nhwc"), "input2": Layout("n??"), "input3": "nchw"}
|
||||
self.assertTrue(source_target_layout_to_str(layout) == "input1([N,H,W,C]),input2([N,?,?]),input3(nchw)")
|
||||
|
||||
self.assertRaises(Exception, source_target_layout_to_str, **{"value": {"op": Dimension(1)}})
|
||||
self.assertRaises(Exception, source_target_layout_to_str, **{"value": {("a", "b"): Layout("nhwc")}})
|
||||
self.assertRaises(Exception, source_target_layout_to_str, **{"value": Dimension(1)})
|
||||
|
||||
def test_layout_param_to_str_to_str(self):
|
||||
layout = {"input1": Layout("nhwc"), "input2": Layout("n??"), "input3": "nchw"}
|
||||
self.assertTrue(layout_param_to_str(layout) == "input1([N,H,W,C]),input2([N,?,?]),input3(nchw)")
|
||||
|
||||
layout_map1 = LayoutMap(source_layout=Layout("n??"), target_layout=None)
|
||||
layout_map2 = LayoutMap(source_layout=Layout("nhwc"), target_layout=("nchw"))
|
||||
layout_map3 = LayoutMap(source_layout="abc", target_layout="cab")
|
||||
|
||||
layout = {"input1": layout_map1, "input2": layout_map2, "input3": layout_map3, "input4": Layout("nhwc"),
|
||||
"input5": "n?"}
|
||||
|
||||
self.assertTrue(layout_param_to_str(layout) == "input1([N,?,?]),input2([N,H,W,C]->nchw),"
|
||||
"input3(abc->cab),input4([N,H,W,C]),input5(n?)")
|
||||
|
||||
self.assertRaises(Exception, layout_param_to_str, **{"value": {"op": Dimension(1)}})
|
||||
self.assertRaises(Exception, layout_param_to_str, **{"value": {("a", "b"): Layout("nhwc")}})
|
||||
self.assertRaises(Exception, layout_param_to_str, **{"value": Dimension(1)})
|
||||
|
||||
layout = ["nhwc", "[n,c]"]
|
||||
self.assertTrue(layout_param_to_str(layout) == "nhwc,[n,c]")
|
||||
|
||||
layout = ["abc->cab", "..nc"]
|
||||
self.assertTrue(layout_param_to_str(layout) == "abc->cab,..nc")
|
||||
|
||||
layout_map1 = LayoutMap(source_layout=Layout("n??"), target_layout=None)
|
||||
layout = [layout_map1, "..nc"]
|
||||
self.assertTrue(layout_param_to_str(layout) == "[N,?,?],..nc")
|
||||
|
||||
layout_map2 = LayoutMap(source_layout=Layout("nhwc"), target_layout=("nchw"))
|
||||
layout_map3 = LayoutMap(source_layout="abc", target_layout="cab")
|
||||
layout = [layout_map2, layout_map3]
|
||||
self.assertTrue(layout_param_to_str(layout) == "[N,H,W,C]->nchw,abc->cab")
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user