Preserve input/output order for ONNX. (#9352)
* Preserving of input/output indices for ONNX. * Fixed checks. * Fixed for case of multiple outputs of node before Result. * Added test for multiple tensor names before Result. * Multiple tensor names before Result fix. * Added order alignment for user input/output. * Extended for case of input names in Parameter tensor list. * Fixed unit tests. * Corrected help. * Small correction. * Code refactoring. * Temporarily reverted refactor. * Fixed wrong changes. * Fixed wrong changes. * Returned reverted refactoring. * Removed inputs_list from serializing.
This commit is contained in:
@@ -2,15 +2,16 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import itertools
|
||||
import numpy as np
|
||||
import os
|
||||
import re
|
||||
import warnings
|
||||
import xml.etree.ElementTree as ET
|
||||
from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from common.constants import test_device, test_precision
|
||||
from common.layer_utils import IEInfer
|
||||
from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine
|
||||
|
||||
from common.utils.common_utils import generate_ir
|
||||
from common.utils.parsers import mapping_parser
|
||||
|
||||
@@ -113,6 +114,31 @@ class CommonLayerTest:
|
||||
mapping_dict=mapping_dict, framework_eps=fw_eps), \
|
||||
"Comparing with Framework failed: ie_res={}; framework_res={}.".format(infer_res, fw_res)
|
||||
|
||||
if len(inputs_dict.keys()) > 1 or len(infer_res.keys()) > 1:
|
||||
tree = ET.parse(path_to_xml)
|
||||
# findall returns elements in document order, this order should be the same as
|
||||
# order of inputs/outputs in original model
|
||||
inputs_ie = [child for child in tree.findall('.//layer[@type="Parameter"]')]
|
||||
outputs_ie = [child for child in tree.findall('.//layer[@type="Result"]')]
|
||||
|
||||
if 'input_names' in kwargs:
|
||||
input_names = kwargs['input_names']
|
||||
for i, input_name in enumerate(input_names):
|
||||
assert inputs_ie[i].attrib['name'] == input_name, \
|
||||
'Input order does not match framework order. Input with index {} is {}, ' \
|
||||
'but expected {}'.format(i, inputs_ie[i].attrib['name'], input_name)
|
||||
|
||||
if 'output_names' in kwargs:
|
||||
output_names = kwargs['output_names']
|
||||
for i, output_name in enumerate(output_names):
|
||||
output_name_ie = outputs_ie[i].attrib['name']
|
||||
output_without_sink_port = re.sub(r'\/sink_port_.', '', output_name_ie)
|
||||
|
||||
assert output_without_sink_port == output_name, \
|
||||
'Output order does not match framework order. Output with index {} is {}, ' \
|
||||
'but expected {}'.format(i, output_without_sink_port, output_name)
|
||||
|
||||
|
||||
# Feed dict for each input is filled with random number.
|
||||
# It is possible to redefine this function and generate your own input
|
||||
def _prepare_input(self, inputs_dict):
|
||||
@@ -124,10 +150,13 @@ class CommonLayerTest:
|
||||
is_ok = True
|
||||
from common.utils.common_utils import allclose
|
||||
for framework_out_name in framework_res:
|
||||
if framework_out_name not in mapping_dict:
|
||||
raise RuntimeError("Output {} not found in mapping file!".format(framework_out_name))
|
||||
|
||||
ie_out_name = mapping_dict[framework_out_name]
|
||||
if framework_out_name not in list(infer_res.keys()):
|
||||
if framework_out_name not in mapping_dict:
|
||||
raise RuntimeError("Output {} not found in mapping file!".format(framework_out_name))
|
||||
ie_out_name = mapping_dict[framework_out_name]
|
||||
else:
|
||||
ie_out_name = framework_out_name
|
||||
|
||||
if not allclose(infer_res[ie_out_name], framework_res[framework_out_name], atol=framework_eps,
|
||||
rtol=framework_eps):
|
||||
|
||||
@@ -124,6 +124,48 @@ class TestConcat(Caffe2OnnxLayerTest):
|
||||
|
||||
return onnx_net, ref_net
|
||||
|
||||
def create_concat_net(self, input_shape, output_shape, axis, input_names, ir_version):
|
||||
"""
|
||||
ONNX net IR net
|
||||
|
||||
Input1----->Concat------>Output => Input1--->Concat------>Output
|
||||
Input2-----' Input2---'
|
||||
Input3-----' Input3---'
|
||||
... ...
|
||||
"""
|
||||
#
|
||||
# Create ONNX model
|
||||
#
|
||||
|
||||
import onnx
|
||||
from onnx import helper
|
||||
from onnx import TensorProto
|
||||
import numpy as np
|
||||
|
||||
shape = input_shape
|
||||
inputs_list = []
|
||||
for input_name in input_names:
|
||||
inputs_list.append(helper.make_tensor_value_info(input_name, TensorProto.FLOAT, shape))
|
||||
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
node = onnx.helper.make_node('Concat', inputs=input_names, outputs=['output'], axis=axis)
|
||||
|
||||
# Create the graph (GraphProto)
|
||||
graph_def = helper.make_graph(
|
||||
[node],
|
||||
'concat_model',
|
||||
inputs_list,
|
||||
[output],
|
||||
)
|
||||
|
||||
# Create the model (ModelProto)
|
||||
onnx_net = helper.make_model(graph_def, producer_name='test_concat_model')
|
||||
|
||||
ref_net = None
|
||||
|
||||
return onnx_net, ref_net
|
||||
|
||||
test_data_3D = [
|
||||
dict(input_shape=[1, 50, 50],
|
||||
output_shape=[2, 50, 50],
|
||||
@@ -194,6 +236,21 @@ class TestConcat(Caffe2OnnxLayerTest):
|
||||
axis=4),
|
||||
]
|
||||
|
||||
test_concat_inputs_order_params = [
|
||||
dict(input_shape=[6],
|
||||
output_shape=[30],
|
||||
axis=0,
|
||||
input_names=['a', 't', 'm', 'p', 'e']),
|
||||
dict(input_shape=[5, 2],
|
||||
output_shape=[5, 8],
|
||||
axis=1,
|
||||
input_names=['inp2', 'inp1', 'inp5', 'inp4']),
|
||||
dict(input_shape=[6, 2, 5, 3],
|
||||
output_shape=[6, 2, 20, 3],
|
||||
axis=2,
|
||||
input_names=['n', 's', 'c', 'x']),
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_concat_3D_const(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
@@ -223,3 +280,9 @@ class TestConcat(Caffe2OnnxLayerTest):
|
||||
def test_concat_5D_const(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
|
||||
@pytest.mark.parametrize("params", test_concat_inputs_order_params)
|
||||
@pytest.mark.nightly
|
||||
def test_concat_inputs_order(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_concat_net(**params, ir_version=ir_version), ie_device=ie_device, precision=precision,
|
||||
ir_version=ir_version, temp_dir=temp_dir, input_names=params['input_names'])
|
||||
|
||||
@@ -30,6 +30,68 @@ test_data_5D = [
|
||||
[1, 50, 10, 80, 60]], axis=2),
|
||||
dict(input_shape=[1, 50, 50, 80, 60], output_shapes=[[1, 25, 50, 80, 60], [1, 25, 50, 80, 60]], axis=1)]
|
||||
|
||||
test_multiple_out = [
|
||||
dict(input_shape=[3, 10, 10],
|
||||
output_shapes=[[1, 10, 10],
|
||||
[1, 10, 10],
|
||||
[1, 10, 10]],
|
||||
axis=0,
|
||||
output_names=['h', 'b', 'l']),
|
||||
dict(input_shape=[1, 50, 50, 80, 60],
|
||||
output_shapes=[[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60]],
|
||||
axis=2,
|
||||
output_names=['k', 'p', 'a', 'r', 's']),
|
||||
dict(input_shape=[1, 4, 3],
|
||||
output_shapes=[[1, 1, 3],
|
||||
[1, 1, 3],
|
||||
[1, 1, 3],
|
||||
[1, 1, 3],
|
||||
[1, 1, 3]],
|
||||
axis=1,
|
||||
output_names=['inp4', 'inp1', 'inp3', 'inp2'])
|
||||
]
|
||||
|
||||
test_multiple_out_with_add = [
|
||||
dict(input_shape=[3, 10, 10],
|
||||
output_shapes=[[1, 10, 10],
|
||||
[1, 10, 10],
|
||||
[1, 10, 10]],
|
||||
axis=0,
|
||||
output_names=['h', 'b', 'l', 'c', 'p']
|
||||
),
|
||||
dict(input_shape=[1, 50, 50, 80, 60],
|
||||
output_shapes=[[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60],
|
||||
[1, 50, 10, 80, 60]],
|
||||
axis=2,
|
||||
output_names=['k', 'p', 'a', 'r', 's', 'l', 'w']),
|
||||
dict(input_shape=[1, 4, 3],
|
||||
output_shapes=[[1, 1, 3],
|
||||
[1, 1, 3],
|
||||
[1, 1, 3],
|
||||
[1, 1, 3],
|
||||
[1, 1, 3]],
|
||||
axis=1,
|
||||
output_names=['inp4', 'inp1', 'inp5', 'inp2', 'inp3', 'inp33'])
|
||||
]
|
||||
|
||||
test_multiple_out_with_identity = [
|
||||
dict(input_shape=[3, 10, 10],
|
||||
output_shapes=[[1, 10, 10],
|
||||
[1, 10, 10],
|
||||
[1, 10, 10]],
|
||||
axis=0,
|
||||
split_out_names=['h', 'b', 'l'],
|
||||
identity_names=['i1', 'i2', 'i3'],
|
||||
output_names=['h', 'b', 'l', 'i3'],
|
||||
),
|
||||
]
|
||||
|
||||
class TestSplitConcat(Caffe2OnnxLayerTest):
|
||||
# TODO Add test with default values (axis=0)
|
||||
@@ -288,6 +350,169 @@ class TestSplit(Caffe2OnnxLayerTest):
|
||||
|
||||
return onnx_net, ref_net
|
||||
|
||||
|
||||
def create_split_net_ordered_outputs(self, input_shape, output_shapes, axis, output_names, ir_version):
|
||||
"""
|
||||
ONNX net IR net
|
||||
|
||||
Input->Split->Output1 => Input->Split->Output1
|
||||
->Output2 => ->Output2
|
||||
->Output3 => ->Output3
|
||||
|
||||
"""
|
||||
#
|
||||
# Create ONNX model
|
||||
#
|
||||
import onnx
|
||||
from onnx import helper
|
||||
from onnx import TensorProto
|
||||
|
||||
shape = input_shape
|
||||
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
|
||||
output_list = []
|
||||
for i, output_name in enumerate(output_names):
|
||||
output_list.append(helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
|
||||
|
||||
node = onnx.helper.make_node('Split', inputs=['input'], outputs=output_names, axis=axis)
|
||||
|
||||
# Create the graph (GraphProto)
|
||||
graph_def = helper.make_graph(
|
||||
[node],
|
||||
'split_model',
|
||||
[input],
|
||||
output_list,
|
||||
)
|
||||
|
||||
# Create the model (ModelProto)
|
||||
onnx_net = helper.make_model(graph_def, producer_name='test_split_model_outputs_order')
|
||||
|
||||
ref_net = None
|
||||
|
||||
return onnx_net, ref_net
|
||||
|
||||
def create_split_net_ordered_outputs_with_add(self, input_shape, output_shapes, axis, output_names, ir_version):
|
||||
"""
|
||||
This test checks the case when graph has a node that is connected with Result and some other operation
|
||||
from single output port.
|
||||
|
||||
ONNX net IR net
|
||||
|
||||
Input Input
|
||||
| |
|
||||
Split Split
|
||||
| | ... | | | .... |
|
||||
Ouput1 Output2 OutputN | | Result_N
|
||||
\ / /\ / \
|
||||
Add / Add \
|
||||
Result_0 | Result_1
|
||||
Result_N+1
|
||||
|
||||
"""
|
||||
#
|
||||
# Create ONNX model
|
||||
#
|
||||
import onnx
|
||||
from onnx import helper
|
||||
from onnx import TensorProto
|
||||
|
||||
shape = input_shape
|
||||
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
|
||||
add_output_name1 = output_names[len(output_names)-2]
|
||||
add_output_name2 = output_names[len(output_names)-1]
|
||||
outputs_without_add = output_names[:len(output_names)-2]
|
||||
|
||||
output_list = []
|
||||
for i, output_name in enumerate(outputs_without_add):
|
||||
output_list.append(helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
|
||||
|
||||
node = onnx.helper.make_node('Split', inputs=['input'], outputs=outputs_without_add, axis=axis)
|
||||
node_add1 = helper.make_node(
|
||||
'Add',
|
||||
inputs=[outputs_without_add[1], outputs_without_add[2]],
|
||||
outputs=[add_output_name1]
|
||||
)
|
||||
node_add2 = helper.make_node(
|
||||
'Add',
|
||||
inputs=[add_output_name1, outputs_without_add[2]],
|
||||
outputs=[add_output_name2]
|
||||
)
|
||||
|
||||
output_list = output_list + [helper.make_tensor_value_info(add_output_name1, TensorProto.FLOAT, output_shapes[0])] + [helper.make_tensor_value_info(add_output_name2, TensorProto.FLOAT, output_shapes[0])]
|
||||
|
||||
# Create the graph (GraphProto)
|
||||
graph_def = helper.make_graph(
|
||||
[node, node_add1, node_add2],
|
||||
'split_model',
|
||||
[input],
|
||||
output_list,
|
||||
)
|
||||
|
||||
# Create the model (ModelProto)
|
||||
onnx_net = helper.make_model(graph_def, producer_name='test_split_model_outputs_order')
|
||||
|
||||
ref_net = None
|
||||
|
||||
return onnx_net, ref_net
|
||||
|
||||
def create_split_net_ordered_outputs_multiple_tensor_names(self, input_shape, output_shapes, axis, split_out_names, identity_names, output_names, ir_version):
|
||||
"""
|
||||
This test checks the case of multiple tensor names on connection incoming to Result. In this case
|
||||
Result name is equal to one of tensor names from the list.
|
||||
|
||||
ONNX net IR net
|
||||
|
||||
Input->Split->Identity1->Identity2->Identity3 -> Output1
|
||||
->Output2
|
||||
->Output3
|
||||
|
||||
|
||||
IR net
|
||||
|
||||
Input->Split->Result1 - this connection has tensor names from Split, Identity1, Identity2, Identity3 ops
|
||||
->Result2
|
||||
->Result3
|
||||
|
||||
"""
|
||||
#
|
||||
# Create ONNX model
|
||||
#
|
||||
import onnx
|
||||
from onnx import helper
|
||||
from onnx import TensorProto
|
||||
|
||||
shape = input_shape
|
||||
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
|
||||
output_list = []
|
||||
for i, output_name in enumerate(split_out_names):
|
||||
output_list.append(helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
|
||||
output_list.append(helper.make_tensor_value_info(identity_names[2], TensorProto.FLOAT, output_shapes[i]))
|
||||
|
||||
node = onnx.helper.make_node('Split', inputs=['input'], outputs=split_out_names, axis=axis)
|
||||
identity1 = onnx.helper.make_node('Identity', inputs=[split_out_names[0]], outputs=[identity_names[0]])
|
||||
identity2 = onnx.helper.make_node('Identity', inputs=[identity_names[0]], outputs=[identity_names[1]])
|
||||
identity3 = onnx.helper.make_node('Identity', inputs=[identity_names[1]], outputs=[identity_names[2]])
|
||||
|
||||
# Create the graph (GraphProto)
|
||||
graph_def = helper.make_graph(
|
||||
[node, identity1, identity2, identity3],
|
||||
'split_model',
|
||||
[input],
|
||||
output_list,
|
||||
)
|
||||
|
||||
# Create the model (ModelProto)
|
||||
onnx_net = helper.make_model(graph_def, producer_name='test_split_model_outputs_order')
|
||||
|
||||
ref_net = None
|
||||
|
||||
return onnx_net, ref_net
|
||||
|
||||
@pytest.mark.parametrize("params", test_data_3D)
|
||||
@pytest.mark.nightly
|
||||
def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
@@ -305,3 +530,28 @@ class TestSplit(Caffe2OnnxLayerTest):
|
||||
def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
|
||||
temp_dir=temp_dir)
|
||||
|
||||
@pytest.mark.parametrize("params", test_multiple_out)
|
||||
def test_split_outputs_order(self, params, ie_device, precision, ir_version, temp_dir):
|
||||
self._test(*self.create_split_net_ordered_outputs(**params, ir_version=ir_version), ie_device, precision,
|
||||
ir_version, temp_dir=temp_dir, output_names=params['output_names'])
|
||||
|
||||
@pytest.mark.parametrize("params", test_multiple_out_with_add)
|
||||
def test_split_outputs_order_multiple_connection_before_result_case(self,
|
||||
params,
|
||||
ie_device,
|
||||
precision,
|
||||
ir_version,
|
||||
temp_dir):
|
||||
self._test(*self.create_split_net_ordered_outputs_with_add(**params, ir_version=ir_version), ie_device,
|
||||
precision, ir_version, temp_dir=temp_dir, output_names=params['output_names'])
|
||||
|
||||
@pytest.mark.parametrize("params", test_multiple_out_with_identity)
|
||||
def test_split_outputs_order_multiple_tensors_before_result_case(self,
|
||||
params,
|
||||
ie_device,
|
||||
precision,
|
||||
ir_version,
|
||||
temp_dir):
|
||||
self._test(*self.create_split_net_ordered_outputs_multiple_tensor_names(**params, ir_version=ir_version),
|
||||
ie_device, precision, ir_version, temp_dir=temp_dir, output_names=params['output_names'])
|
||||
|
||||
Reference in New Issue
Block a user