Remove example_output for ovc and ovc.convert_model (#19079)

* Removed 'example_output' from ovc and ovc.convert_model, used output for this purpose

* Update tools/ovc/openvino/tools/ovc/convert.py

* Update tools/ovc/openvino/tools/ovc/convert_impl.py

* Reverted mo parts not affected by remove of example_output

* fix PDPD convert_model tests

---------

Co-authored-by: Xiuchuan Zhai <xiuchuan.zhai@intel.com>
This commit is contained in:
Sergey Lyalin 2023-08-15 15:14:25 +04:00 committed by GitHub
parent 66345ed9fc
commit bf607a1238
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 16 additions and 21 deletions

View File

@ -97,7 +97,7 @@ class TestMoConvertPaddle(CommonMOConvertTest):
def test_mo_import_from_memory_paddle_fe(self, create_model, ie_device, precision, ir_version,
temp_dir):
fw_model, graph_ref, mo_params = create_model(temp_dir)
test_params = {'input_model': fw_model, 'use_new_frontend': True}
test_params = {'input_model': fw_model}
if mo_params is not None:
test_params.update(mo_params)
test_params.update({'use_convert_model_from_mo': True})

View File

@ -77,7 +77,7 @@ def create_paddle_static_module(tmp_dir):
pd_model, x, y = make_pd_static_graph_model(shape)
ref_model = make_ref_graph_model(shape)
return pd_model, ref_model, {"example_input": [x], "example_output": [y]}
return pd_model, ref_model, {"example_input": [x], "output": [y]}
def create_paddle_hapi_module(tmp_dir):
shape = [2,3,4]
@ -97,7 +97,7 @@ class TestMoConvertPaddle(CommonMOConvertTest):
def test_mo_import_from_memory_paddle_fe(self, create_model, ie_device, precision, ir_version,
temp_dir):
fw_model, graph_ref, mo_params = create_model(temp_dir)
test_params = {'input_model': fw_model, 'use_new_frontend': True}
test_params = {'input_model': fw_model}
if mo_params is not None:
test_params.update(mo_params)
self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False)

View File

@ -21,9 +21,6 @@ def convert_model(
extension: [str, pathlib.Path, list, Any] = None,
verbose: bool = False,
share_weights: bool = True,
# PaddlePaddle-specific parameters:
example_output: Any = None, # TODO: Consider removing
) -> Model:
"""
Converts the model from original framework to OpenVino Model.
@ -72,8 +69,10 @@ def convert_model(
:param output:
The name of the output operation of the model or list of names. For TensorFlow*,
do not add :0 to this name.The order of outputs in converted model is the
same as order of specified operation names.
do not add :0 to this name. The order of outputs in converted model is the
same as order of specified operation names. For PaddlePaddle model represented
as a Python object, you can specify outputs as a PaddlePaddle Python objects or
a list of such objects.
:param example_input:
Sample of model input in original framework.
For PyTorch it can be torch.Tensor.
@ -92,10 +91,6 @@ def convert_model(
runtime object, then original memory regions allocated in the original model
are reused for weights in the converted model.
PaddlePaddle-specific parameters:
:param example_output:
Sample of model output in original framework. For PaddlePaddle it can be Paddle Variable.
Returns:
openvino.runtime.Model
"""

View File

@ -424,12 +424,14 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used):
if 'example_input' in args and args['example_input'] is not None:
example_inputs = args['example_input']
#TODO: Check what example_outputs is and remove if not needed
example_outputs = None
if 'example_output' in args and args['example_output'] is not None:
example_outputs = args['example_output']
outputs = None
if 'output' in args and args['output'] is not None:
# Once the temporary PDPD model is generated. output can be dropped.
# Just swap outputs and args['output'] can reset the argv.output to `None`.
# It can avoid the following `output` negative effect.
outputs, args['output'] = args['output'], outputs
paddle_runtime_converter = paddle_frontend_converter(args['input_model'], example_inputs,
example_outputs)
outputs)
pdmodel = paddle_runtime_converter.convert_paddle_to_pdmodel()
args['input_model'] = pdmodel

View File

@ -63,7 +63,7 @@ class paddle_frontend_converter:
elif isinstance(self.model, paddle.fluid.executor.Executor):
if self.outputs is None:
raise RuntimeError(
"Model is static. Saving inference model needs 'outputs' before saving. Please specify 'example_output' for this model"
"Model is static. Saving inference model needs 'outputs' before saving. Please specify 'output' for this model"
)
paddle.static.save_inference_model(self.model_name, self.inputs, self.outputs, self.model)
else:

View File

@ -442,7 +442,6 @@ class TestConvertModelParamsParsing(unittest.TestCase):
ref_params = {
'Optional parameters:': {'input_model', 'input', 'output', 'example_input',
'extension', 'verbose', 'share_weights'},
'PaddlePaddle-specific parameters:': {'example_output'},
}
params = get_mo_convert_params()
@ -454,8 +453,7 @@ class TestConvertModelParamsParsing(unittest.TestCase):
for group_name, params in ref_params.items():
for param_name in params:
param_name = '--' + param_name
if group_name == 'PaddlePaddle-specific parameters:' or \
param_name in ['--input_model', '--share_weights', '--example_input']:
if param_name in ['--input_model', '--share_weights', '--example_input']:
assert param_name not in cli_parser._option_string_actions
else:
assert param_name in cli_parser._option_string_actions