Intorduce json model analysis feature on the new path (#9618)

This commit is contained in:
Mateusz Bencer 2022-01-18 11:18:19 +01:00 committed by GitHub
parent a8381f71f9
commit 5fe228bc14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 256 additions and 0 deletions

View File

@ -837,6 +837,7 @@ openvino/tools/mo/mo_onnx.py
openvino/tools/mo/mo_paddle.py
openvino/tools/mo/mo_tf.py
openvino/tools/mo/moc_frontend/__init__.py
openvino/tools/mo/moc_frontend/analysis.py
openvino/tools/mo/moc_frontend/extractor.py
openvino/tools/mo/moc_frontend/pipeline.py
openvino/tools/mo/moc_frontend/serialize.py

View File

@ -0,0 +1,45 @@
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import json
from openvino.runtime import PartialShape, Model, Type # pylint: disable=no-name-in-module,import-error
from openvino.runtime.utils.types import get_dtype
def json_model_analysis_dump(framework_model: Model):
def dump_partial_shape(shape: PartialShape):
if shape.rank.is_dynamic:
return 'None'
return [dim.get_length() if dim.is_static else 0 for dim in shape]
def dump_element_type(ov_type: Type):
try:
return str(get_dtype(ov_type))
except:
return 'None'
json_dump = {}
json_dump['inputs'] = {}
for param in framework_model.get_parameters():
param_name = param.get_friendly_name()
json_dump['inputs'][param_name] = {}
json_dump['inputs'][param_name]['shape'] = dump_partial_shape(param.get_partial_shape())
json_dump['inputs'][param_name]['data_type'] = dump_element_type(param.get_element_type())
json_dump['inputs'][param_name]['value'] = 'None' # not supported in 22.1
json_dump['intermediate'] = {}
#TODO: extend model analysis dump for operations with body graphs (If, Loop, and TensorIterator)
for op in framework_model.get_ordered_ops():
for out_idx in range(op.get_output_size()):
output = op.output(out_idx)
tensor_name = output.get_any_name()
json_dump['intermediate'][tensor_name] = {}
json_dump['intermediate'][tensor_name]['shape'] = dump_partial_shape(output.get_partial_shape())
json_dump['intermediate'][tensor_name]['data_type'] = dump_element_type(output.get_element_type())
json_dump['intermediate'][tensor_name]['value'] = 'None' # not supported in 22.1
json_model_analysis_print(json_dump)
def json_model_analysis_print(json_dump:str):
print(json_dump)

View File

@ -4,9 +4,13 @@
import argparse
import logging as log
from typing import List
import sys
from os import environ
from openvino.tools.mo.moc_frontend.analysis import json_model_analysis_dump
from openvino.tools.mo.moc_frontend.extractor import fe_user_data_repack
from openvino.tools.mo.middle.passes.infer import validate_batch_in_shape
from openvino.tools.mo.utils.class_registration import get_enabled_and_disabled_transforms
from openvino.runtime import Dimension, PartialShape # pylint: disable=no-name-in-module,import-error
from openvino.frontend import FrontEnd, InputModel, NotImplementedFailure, Place # pylint: disable=no-name-in-module,import-error
@ -57,6 +61,14 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
log.warn('Could not add an additional name to a tensor pointed to by \'{}\'. Details: {}'.format(
new_input['input_name'], str(e)))
enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
if 'ANALYSIS_JSON_PRINT' in enabled_transforms:
# NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.)
framework_model = moc_front_end.decode(input_model)
json_model_analysis_dump(framework_model)
# a model is not processed further in json analysis mode
sys.exit(0)
inputs_equal = True
if user_shapes:
inputs_equal = check_places_are_same(input_model.get_inputs(), user_shapes)

View File

@ -68,3 +68,12 @@ def test_mo_fallback_test():
status = subprocess.run(args, env=os.environ)
assert not status.returncode
def test_mo_model_analysis():
setup_env()
args = [sys.executable, '-m', 'pytest',
os.path.join(os.path.dirname(__file__), 'utils/test_mo_model_analysis_actual.py'), '-s']
status = subprocess.run(args, env=os.environ)
assert not status.returncode

View File

@ -0,0 +1,189 @@
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import patch, Mock
import onnx
from onnx.helper import make_graph, make_model, make_tensor_value_info
import os
from os import environ
import json
import argparse
from openvino.tools.mo.main import prepare_ir
from openvino.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error
from openvino.tools.mo.moc_frontend.analysis import json_model_analysis_dump
try:
import openvino_telemetry as tm
except ImportError:
import openvino.tools.mo.utils.telemetry_stub as tm
def base_args_config():
args = argparse.Namespace()
args.feManager = FrontEndManager()
args.extensions = None
args.use_legacy_frontend = False
args.use_new_frontend = True
args.framework = 'onnx'
args.model_name = None
args.input_model = None
args.silent = True
args.transform=[]
args.legacy_ir_generation = False
args.scale = None
args.output=None
args.input=None
args.input_shape=None
args.batch=None
args.mean_values=None
args.scale_values=None
args.output_dir=os.getcwd()
args.freeze_placeholder_with_value = None
args.transformations_config = None
args.disable_fusing = None
args.finegrain_fusing = None
args.disable_gfusing = None
args.disable_resnet_optimization = None
args.enable_concat_optimization = None
args.static_shape = None
args.disable_weights_compression = None
args.reverse_input_channels = None
args.data_type = None
args.layout = None
args.source_layout = None
args.target_layout = None
args.frontend_defaults = {
'onnx': 'legacy',
'tf': 'legacy'
}
return args
class TestMoFallback(unittest.TestCase):
def setUp(self):
environ.update({'MO_ENABLED_TRANSFORMS': 'ANALYSIS_JSON_PRINT'})
tm.Telemetry.__init__ = Mock(return_value=None)
tm.Telemetry.send_event = Mock()
self.models = {}
add = onnx.helper.make_node("Add", inputs=["in1", "in2"], outputs=["add_out"])
input_tensors = [
make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (1, 2)),
make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (1, 2)),
]
output_tensors = [
make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (1, 2)),
]
graph = make_graph([add], "test_graph", input_tensors, output_tensors)
model = make_model(graph, producer_name="MO tests",
opset_imports=[onnx.helper.make_opsetid("", 13)])
self.models["test_model.onnx"] = model
input_tensors_2 = [
make_tensor_value_info("in1", onnx.TensorProto.INT64, (1, 'dyn_dim', 3)),
make_tensor_value_info("in2", onnx.TensorProto.INT64, None),
make_tensor_value_info("in3", onnx.TensorProto.INT64, ()),
]
output_tensors_2 = [
make_tensor_value_info("mul_out", onnx.TensorProto.FLOAT, None),
]
mul = onnx.helper.make_node("Mul", inputs=["add_out", "in3"], outputs=["mul_out"])
graph_2 = make_graph([add, mul], "test_graph_2", input_tensors_2, output_tensors_2)
model_2 = make_model(graph_2, producer_name="MO tests",
opset_imports=[onnx.helper.make_opsetid("", 13)])
self.models["test_model_2.onnx"] = model_2
split_1 = onnx.helper.make_node("Split", inputs=["add_out"],
outputs=["out1", "out2"], axis=0)
split_2 = onnx.helper.make_node("Split", inputs=["mul_out"],
outputs=["out3", "out4"], axis=0)
output_tensors_3 = [
make_tensor_value_info("out1", onnx.TensorProto.FLOAT, 'dyn_dim'),
make_tensor_value_info("out2", onnx.TensorProto.FLOAT, 'dyn_dim'),
make_tensor_value_info("out3", onnx.TensorProto.FLOAT, 'dyn_dim'),
make_tensor_value_info("out4", onnx.TensorProto.FLOAT, 'dyn_dim'),
]
graph_3 = make_graph([add, mul, split_1, split_2], "test_graph_3", input_tensors_2, output_tensors_3)
model_3 = make_model(graph_3, producer_name="MO tests",
opset_imports=[onnx.helper.make_opsetid("", 13)])
self.models["test_model_3.onnx"] = model_3
for name, model in self.models.items():
onnx.save(model, name)
def tearDown(self):
del environ['MO_ENABLED_TRANSFORMS']
for name in self.models.keys():
os.remove(name)
@patch('openvino.tools.mo.moc_frontend.analysis.json_model_analysis_print')
def test_model(self, json_print):
args = base_args_config()
args.input_model = "test_model.onnx"
with patch('sys.exit') as exit_mock: # do not exit execution
prepare_ir(args)
result = json_print.call_args.args[0]
assert 'inputs' in result
assert result['inputs'] == json.loads('{"in1": {"shape": [1, 2], "data_type": "float32", "value": "None"}, \
"in2": {"shape": [1, 2], "data_type": "float32", "value": "None"}}')
assert 'intermediate' in result
assert result['intermediate'] == json.loads('{"in1": {"shape": [1, 2], "data_type": "float32", "value": "None"}, \
"in2": {"shape": [1, 2], "data_type": "float32", "value": "None"}, \
"add_out": {"shape": "None", "data_type": "None", "value": "None"}}')
@patch('openvino.tools.mo.moc_frontend.analysis.json_model_analysis_print')
def test_model_with_dyn_shapes(self, json_print):
args = base_args_config()
args.input_model = "test_model_2.onnx"
with patch('sys.exit') as exit_mock: # do not exit execution
prepare_ir(args)
result = json_print.call_args.args[0]
assert 'inputs' in result
print(result['inputs'])
assert result['inputs'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \
"in2": {"shape": "None", "data_type": "int64", "value": "None"}, \
"in3": {"shape": [], "data_type": "int64", "value": "None"}}')
assert 'intermediate' in result
assert result['intermediate'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \
"in2": {"shape": "None", "data_type": "int64", "value": "None"}, \
"in3": {"shape": [], "data_type": "int64", "value": "None"}, \
"mul_out": {"shape": "None", "data_type": "None", "value": "None"}, \
"add_out": {"shape": "None", "data_type": "None", "value": "None"}}')
@patch('openvino.tools.mo.moc_frontend.analysis.json_model_analysis_print')
def test_multi_outputs_model(self, json_print):
args = base_args_config()
args.input_model = "test_model_3.onnx"
with patch('sys.exit') as exit_mock: # do not exit execution
prepare_ir(args)
result = json_print.call_args.args[0]
assert 'inputs' in result
assert result['inputs'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \
"in2": {"shape": "None", "data_type": "int64", "value": "None"}, \
"in3": {"shape": [], "data_type": "int64", "value": "None"}}')
assert 'intermediate' in result
assert result['intermediate'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \
"in2": {"shape": "None", "data_type": "int64", "value": "None"}, \
"in3": {"shape": [], "data_type": "int64", "value": "None"}, \
"mul_out": {"shape": "None", "data_type": "None", "value": "None"}, \
"add_out": {"shape": "None", "data_type": "None", "value": "None"}, \
"out1": {"shape": "None", "data_type": "None", "value": "None"}, \
"out2": {"shape": "None", "data_type": "None", "value": "None"}, \
"out3": {"shape": "None", "data_type": "None", "value": "None"}, \
"out4": {"shape": "None", "data_type": "None", "value": "None"}}')