Removed exception for BytesIO objects in convert_model() (#18318)

* Removed exception for BytesIO objects, added test.

* Small correction.

* Corrected imports.

* Apply suggestions from code review

Co-authored-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Removed wrong comment.

---------

Co-authored-by: Roman Kazantsev <roman.kazantsev@intel.com>
This commit is contained in:
Anastasiia Pnevskaia 2023-07-03 16:04:35 +02:00 committed by GitHub
parent 9dd27dd810
commit 5838685bd5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 101 additions and 24 deletions

View File

@ -0,0 +1,82 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import io
import numpy as np
import openvino.runtime as ov
import pytest
from openvino.runtime import Model
from common.mo_convert_test_class import CommonMOConvertTest
def make_graph_proto_model():
import onnx
from onnx import helper
from onnx import TensorProto
shape = [2, 3, 4]
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
node_def = onnx.helper.make_node(
'LeakyRelu',
inputs=['input'],
outputs=['LeakyRelu_data'],
alpha=0.1
)
node_def2 = onnx.helper.make_node(
'Elu',
inputs=['LeakyRelu_data'],
outputs=['output'],
alpha=0.1
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def, node_def2],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
return onnx_net
def create_ref_model(shape):
param1 = ov.opset8.parameter(shape, dtype=np.float32)
slope_const = ov.opset8.constant([0.1], dtype=np.float32)
prelu = ov.opset8.prelu(param1, slope=slope_const)
relu = ov.opset8.elu(prelu, alpha=np.float32(0.1))
parameter_list = [param1]
return Model([relu], parameter_list, "test")
def create_bytes_io():
import onnx
onnx_model = make_graph_proto_model()
file_like_object = io.BytesIO()
onnx.save(onnx_model, file_like_object)
ref_model = create_ref_model([2,3,4])
return file_like_object, ref_model, {}
class TestMoConvertONNX(CommonMOConvertTest):
test_data = [create_bytes_io]
@pytest.mark.parametrize("create_model", test_data)
@pytest.mark.nightly
@pytest.mark.precommit
def test_mo_convert_onnx(self, create_model, ie_device, precision, ir_version,
temp_dir):
fw_model, graph_ref, mo_params = create_model()
test_params = {'input_model': fw_model}
if mo_params is not None:
test_params.update(mo_params)
self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False)

View File

@ -49,30 +49,25 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
:param: moc_front_end: Loaded Frontend for converting input model
:return: converted nGraph function ready for serialization
"""
if isinstance(argv.input_model, io.BytesIO):
raise Exception("ONNX frontend does not support input model as BytesIO object. "
"Please use use_legacy_frontend=True to convert the model.")
else:
input_checkpoint = getattr(argv, 'input_checkpoint', None)
if argv.input_model and input_checkpoint:
# frozen format with v1 checkpoints
input_model = moc_front_end.load([argv.input_model, argv.input_checkpoint])
elif argv.input_model:
# frozen model without v1 checkpoints
input_model = moc_front_end.load(argv.input_model)
elif argv.saved_model_dir:
if argv.saved_model_tags:
input_model = moc_front_end.load([argv.saved_model_dir, argv.saved_model_tags])
else:
input_model = moc_front_end.load(argv.saved_model_dir)
elif argv.input_meta_graph:
input_model = moc_front_end.load(argv.input_meta_graph)
if argv.output:
# Simulate original behavior with freezing model
# While freezing we do a cutting of model, to keep similar behavior we
# need to simulate similar behavior with natively supported model
outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
input_model.override_all_outputs([x['node'] for x in outputs])
input_checkpoint = getattr(argv, 'input_checkpoint', None)
if argv.input_model and input_checkpoint:
# frozen format with v1 checkpoints
input_model = moc_front_end.load([argv.input_model, argv.input_checkpoint])
elif argv.input_model:
input_model = moc_front_end.load(argv.input_model)
elif argv.saved_model_dir:
if argv.saved_model_tags:
input_model = moc_front_end.load([argv.saved_model_dir, argv.saved_model_tags])
else:
input_model = moc_front_end.load(argv.saved_model_dir)
elif argv.input_meta_graph:
input_model = moc_front_end.load(argv.input_meta_graph)
if argv.output:
# Simulate original behavior with freezing model
# While freezing we do a cutting of model, to keep similar behavior we
# need to simulate similar behavior with natively supported model
outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
input_model.override_all_outputs([x['node'] for x in outputs])
argv.placeholder_shapes, argv.placeholder_data_types, argv.freeze_placeholder_with_value = convert_params_lists_to_dicts(
input_model, argv.placeholder_shapes, argv.placeholder_data_types,