Support unregistered operations in MO IR Reader (#6837)

* Add support for unregistred operations in MO IR Reader

* Remove commented lines

* Add shapes equality check

* Update comments

* Update groupconv_to_conv function to support case with multiple destinations

* Add ir_data_attrs attribute to restored layers

* Update copy_shape_infer function to new graph api

* Add attribute IE to unsuppurted operations to save their attributes

* Fix wrong attribute name

* Update commentary

* Partially revert updating to new Graph API to fix regression, add appropriate comments

* Update code comments

* Rename copy_shape_infer function and add more comments
This commit is contained in:
Anton Chetverikov
2021-08-18 12:56:13 +03:00
committed by GitHub
parent 96732e3f0e
commit 6369f43c99
7 changed files with 61 additions and 26 deletions

View File

@@ -66,7 +66,7 @@ def compare_graphs(graph: Graph, graph_ref: Graph, last_node: str, last_node_ref
ref_node_type = node_ref.type if node_ref.has_valid("type") else None
for attr in graph_ref.node[node_ref.id]:
if graph_ref.node[node_ref.id][attr] is None or attr in ['name', 'id', '_in_ports', '_out_ports',
'infer', 'IE', 'biases', 'weights', 'custom', 'offset']:
'infer', 'IE', 'biases', 'weights', 'custom', 'offset', 'ir_data_attrs']:
continue
if attr not in graph.node[node.id]:
stderr.append('Current node "{}" with type {} has missing attribute {}'.format(node.id, cur_node_type, attr))

View File

@@ -208,6 +208,7 @@ class IREngine(object):
for attr in layer:
if attr.tag == 'data':
new_attrs = self.__normalize_attrs(attr.attrib)
new_attrs['ir_data_attrs'] = attr.attrib
if layer.attrib['type'] == 'Const':
assert 'offset' in new_attrs and 'size' in new_attrs, \
'Incorrect attributes for Const layer, {} instead of {}!'.format(new_attrs.keys(), ['offset', 'size'])

View File

@@ -2,6 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
import logging as log
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.utils import class_registration
@@ -35,8 +36,29 @@ class Extender(object):
node[attribute] = [node[attribute]]
@staticmethod
def const_shape_infer(node: Node):
i = len(node.in_nodes())
for num in node.out_nodes():
node.out_node(num).shape = int64_array(node.ports[i][0])
def use_shapes_from_ir(node: Node):
# This function used instead of operation shape inference function to set all output shapes the same as
# restored from IR. Firstly, check equality of old (restored from IR) and
# new (calculated while shape inference) input shapes
node['new_input_shapes'] = list()
for n in node.in_ports():
if not node.in_port(n).disconnected(): # We use such condition to handle optional inputs
node.new_input_shapes.append(node.in_port(n).data.get_shape())
assert len(node.new_input_shapes) == len(node.old_input_shapes), \
'Something wrong happened while {} node with type {} copy shape inference!'.format(node.name, node.type)
for new_input_shape, old_input_shape in zip(node.new_input_shapes, node.old_input_shapes):
assert np.array_equal(new_input_shape, old_input_shape), \
'Something wrong happened while {} node with type {} copy shape inference!'.format(node.name, node.type)
# We need to use number of connected input ports to avoid errors with numbering
# in node.ports dictionary, where used numbers of input nodes
connected_input_ports = []
for n in node.in_ports():
if not node.in_port(n).disconnected():
connected_input_ports.append(node.in_port(n))
i = len(connected_input_ports)
# Set all output shapes the same as restored from IR
for num in node.out_ports():
node.out_port(num).data.set_shape(int64_array(node.ports[i][0]))
i += 1

View File

@@ -12,4 +12,4 @@ class LSTMCell_extender(Extender):
def extend(op: Node):
if not op.has_valid('activations'):
op['activations'] = None
op['infer'] = Extender.const_shape_infer
op['infer'] = Extender.use_shapes_from_ir

View File

@@ -50,13 +50,4 @@ def common_backpropdata_extender(op: Node):
def backpropdata_infer(op: Node):
op['new_input_shapes'] = list()
for n in op.in_nodes():
op.new_input_shapes.append(op.in_node(n).shape)
assert len(op.new_input_shapes) == len(op.old_input_shapes)
for i in range(len(op.new_input_shapes)):
assert np.array_equal(op.new_input_shapes[i], op.old_input_shapes[i]), 'Something wrong happened while ' \
'{} shape infer with type {}!'.format(op.name, op.type)
Extender.const_shape_infer(op)
Extender.use_shapes_from_ir(op)

View File

@@ -40,4 +40,4 @@ class TensorIterator_extender(Extender):
del(edge['from-layer'])
del(edge['to-layer'])
op['infer'] = Extender.const_shape_infer
op['infer'] = Extender.use_shapes_from_ir

View File

@@ -185,15 +185,17 @@ def groupconv_to_conv(op: Node):
if weights_node.type == 'Const':
weights_node.value = np.reshape(weights_node.value, new_shape)
elif weights_node.type == 'Reshape':
# we remove reshape node added in ConvolutionWithGroupsResolver pass
# We remove reshape node added in ConvolutionWithGroupsResolver pass
assert weights_node.in_port(0).get_source().data.get_shape() == new_shape, \
'Weight shape and calculated shape mismatch in GroupConv node {}.'.format(op.name)
op.in_port(1).disconnect()
weights_node.in_port(0).get_source().get_connection().set_destination(op.in_port(1))
# We use add_destination method here to support case with multiple destinations of source port
weights_node.in_port(0).get_source().get_connection().add_destination(op.in_port(1))
weights_node.in_port(0).disconnect()
else:
assert op.in_port(1).get_source().data.get_shape() == new_shape, \
'Weight shape and calculated shape mismatch in GroupConv node {}.'.format(op.name)
# we need to set this attrs for correct shape infer as convolution
# We need to set this attrs for correct shape infer as convolution
op['group'] = group
# The only way GroupConvolution with 'group' = 1 appears in IR is by converting from TF DepthwiseConv2dNative.
# In this case we need to specify 'op' parameter for the
@@ -218,9 +220,6 @@ def backprop_to_deconv(op: Node):
if op.has_valid('output_padding'):
# In this case we need to create Deconvolution as Convolution
op['type_to_create'] = 'Convolution'
op['old_input_shapes'] = list()
for n in op.in_nodes():
op.old_input_shapes.append(int64_array(op.in_node(n).shape))
def ti_add_edge_attrs(op: Node):
@@ -344,6 +343,11 @@ def copy_graph_with_ops(graph: Graph) -> Graph:
# Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.)
for op in graph.get_op_nodes():
# Save input shapes restored from IR
op['old_input_shapes'] = list()
for n in op.in_nodes():
op.old_input_shapes.append(int64_array(op.in_node(n).shape))
# Apply extenders to nodes in source graph
if op.type in Extender.registered_ops:
Extender.get_extender_class_by_name(op.type).extend(op)
@@ -356,9 +360,26 @@ def copy_graph_with_ops(graph: Graph) -> Graph:
if op_type in custom_ops:
node = custom_ops[op_type](new_graph, op.attrs()).create_node()
else:
assert op_type in Op.registered_ops, 'Operation {} not found in MO operations, ' \
'please check it!'.format(op_type)
node = Op.get_op_class_by_name(op_type)(new_graph, op.attrs()).create_node()
if op_type not in Op.registered_ops:
log.warning('Operation {} is not found in MO operations, please check it! '
'Simple shape infer function is used'.format(op_type))
node = Op(new_graph, op.attrs()).create_node()
node['infer'] = Extender.use_shapes_from_ir
if 'ir_data_attrs' in op:
node['IE'] = [('layer',
[('id', lambda node: node.node), 'name', 'type', 'version'],
[('data',
list(op.ir_data_attrs.keys()),
[]),
'@ports',
'@consts'])]
else:
node = Op.get_op_class_by_name(op_type)(new_graph, op.attrs()).create_node()
# This attribute is no longer needed and we can delete it
if 'ir_data_attrs' in node:
del node['ir_data_attrs']
if op.has_and_set('need_copy_input_blobs'):
copy_input_blobs(op, node)