MO clean from IR v7 and other legacy code (#1521)

* Remove unnnecessary ir_version checks in the MO

* Cleaned up 'backend_attrs_v2' function

* Small clean up from the 'TFCustomSubgraphCall'

* Clean up the MO extractor attributes mapping

* Renamed PreluOp to PReLU
This commit is contained in:
Evgeny Lazarev 2020-07-29 17:43:12 +03:00 committed by GitHub
parent aef6016298
commit dec7df17ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 125 additions and 656 deletions

View File

@ -16,7 +16,7 @@
import numpy as np
from extensions.back.ForceStrictPrecision import ForceStrictPrecision
from extensions.ops.prelu import PreluOp
from extensions.ops.prelu import PReLU
from mo.back.replacement import BackReplacementPattern
from mo.graph.graph import Graph, rename_node
from mo.ops.const import Const
@ -45,7 +45,7 @@ class LeakyReLUMutation(BackReplacementPattern):
rename_node(relu, relu_name + '/to_delete')
# Create PReLU op and reconnect input/output from LeakyReLU to PReLU
prelu = PreluOp(graph, dict(name=relu_name)).create_node()
prelu = PReLU(graph, dict(name=relu_name)).create_node()
rename_node(prelu, relu_name)
const = Const(graph, dict(name=relu_name + "/weights", value=np.array([relu.negative_slope]))).create_node()

View File

@ -23,7 +23,7 @@ from mo.ops.reshape import Reshape
class OneHotDepthNormalizer(FrontReplacementPattern):
"""
Transformation performs squeezeng one-element tensors on 1st input in OneHot into 0D scalars. This transformation
Transformation performs squeezing one-element tensors on 1st input in OneHot into 0D scalars. This transformation
allows to avoid problems with some models produced by tf2onnx which have 1D depth in OneHot.
"""
enabled = True

View File

@ -13,7 +13,7 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.prelu import PreluOp
from extensions.ops.prelu import PReLU
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.caffe.extractors.utils import weights_biases
from mo.front.common.extractors.utils import layout_attrs
@ -57,5 +57,5 @@ class PreluFrontExtractor(FrontExtractorOp):
mapping_rule.update(layout_attrs())
# update the attributes of the node
PreluOp.update_node_stat(node, mapping_rule)
PReLU.update_node_stat(node, mapping_rule)
return cls.enabled

View File

@ -18,7 +18,7 @@ import unittest
from unittest.mock import patch
from extensions.front.caffe.prelu_ext import PreluFrontExtractor
from extensions.ops.prelu import PreluOp
from extensions.ops.prelu import PReLU
from mo.ops.op import Op
from mo.utils.unittest.extractors import FakeMultiParam
from mo.utils.unittest.graph import FakeNode
@ -32,7 +32,7 @@ class FakePReLUProtoLayer:
class TestPreluExt(unittest.TestCase):
@classmethod
def setUpClass(cls):
Op.registered_ops['PReLU'] = PreluOp
Op.registered_ops['PReLU'] = PReLU
def test_prelu_no_pb_no_ml(self):
self.assertRaises(AttributeError, PreluFrontExtractor.extract, None)
@ -56,7 +56,7 @@ class TestPreluExt(unittest.TestCase):
'type': 'PReLU',
'op': 'PReLU',
'channel_shared': 0,
'infer': PreluOp.infer,
'infer': PReLU.infer,
}
for key in exp_res.keys():

View File

@ -15,7 +15,7 @@
"""
from extensions.ops.activation_ops import Elu, LeakyReLU, ReLU
from extensions.ops.prelu import PreluOp
from extensions.ops.prelu import PReLU
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.utils.error import Error
@ -40,7 +40,7 @@ class LeakyReLUFrontExtractor(FrontExtractorOp):
'std': 0,
'sparse': -1,
'variance_norm': "caffe.FillerParameter.FAN_IN"}
PreluOp.update_node_stat(node, prelu_attrs)
PReLU.update_node_stat(node, prelu_attrs)
elif act_type == 'elu':
alpha = attrs.float('slope', 0.25)
Elu.update_node_stat(node, {'alpha': alpha})

View File

@ -23,12 +23,6 @@ from mo.ops.result import Result
class BasicLSTMCell(FrontReplacementSubgraph):
enabled = True
# When the deprecated IR version was requested, we configure only those phases that can lead
# to functional regressions in the version 2. BasicLSTMCell is one such transformation;
# when it is turned off, the body of TF basic_lstm_cell is converted as-is in a decomposed form,
# and should work in version 2.
graph_condition = [lambda graph: graph.graph['ir_version'] != 2]
# list of names of all original nodes that are supported by IE
# this list is collected gradually by a separate transformation
# original name in this case is a selected node in the pattern

View File

@ -17,13 +17,13 @@
import logging as log
from extensions.front.PowerToEltwises import PowerToEltwises
from extensions.ops.prelu import PreluOp
from extensions.ops.prelu import PReLU
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.graph.graph import Graph
from mo.middle.pattern_match import check_node_usages_out_of_match
class PReLU(FrontReplacementSubgraph):
class PReLUPatternFuse(FrontReplacementSubgraph):
enabled = True
def run_before(self):
@ -57,12 +57,12 @@ class PReLU(FrontReplacementSubgraph):
''.format(', '.join([match[n].id for n in consumers])))
return
gamma = match['mul'].in_node(0) if match['mul'].in_node(1).id == match['neg_1'].id else match['mul'].in_node(1)
prelu_node = PreluOp(graph, {'name': '{}/PReLU'.format(match['add'].id)}).create_node([match['op'], gamma])
prelu_node = PReLU(graph, {'name': '{}/PReLU'.format(match['add'].id)}).create_node([match['op'], gamma])
match['add'].replace_node(prelu_node)
log.debug('PReLU pattern starting from "{}" was collapsed to "{}"'.format(match['op'].id, prelu_node.id))
class PReLUWithAbs(FrontReplacementSubgraph):
class PReLUWithAbsPatternFuse(FrontReplacementSubgraph):
enabled = True
def pattern(self):
@ -96,6 +96,6 @@ class PReLUWithAbs(FrontReplacementSubgraph):
'replace '.format(', '.join([match[n].id for n in consumers])))
return
gamma = match['mul'].in_node(0) if match['mul'].in_node(1).id == match['sub'].id else match['mul'].in_node(1)
prelu_node = PreluOp(graph, {'name': '{}/PReLU'.format(match['add'].id)}).create_node([match['op'], gamma])
prelu_node = PReLU(graph, {'name': '{}/PReLU'.format(match['add'].id)}).create_node([match['op'], gamma])
match['add'].replace_node(prelu_node)
log.debug('PReLUWithAbs pattern starting from "{}" was collapsed to "{}"'.format(match['op'].id, prelu_node.id))

View File

@ -51,10 +51,8 @@ class CustomSubgraphCall(MiddleReplacementPattern):
:param graph: graph to operate on
:return: None
"""
for node_name in graph.nodes():
node = Node(graph, node_name)
if node.kind == 'op' and node.has_valid('op') and node.op == 'TFCustomSubgraphCall':
CustomSubgraphCall.update_placeholder_shape_and_add_transpose(node)
for node in graph.get_op_nodes(op='TFCustomSubgraphCall'):
CustomSubgraphCall.update_placeholder_shape_and_add_transpose(node)
@staticmethod
def update_placeholder_shape_and_add_transpose(node: Node):
@ -116,10 +114,8 @@ class CustomSubgraphCall(MiddleReplacementPattern):
:param graph: graph to operate on
:return: None
"""
for node_name in graph.nodes():
node = Node(graph, node_name)
if node.kind == 'op' and node.has_valid('op') and node.op == 'TFCustomSubgraphCall':
CustomSubgraphCall.add_sub_graph_call_output_tensors_transposes(node)
for node in graph.get_op_nodes(op='TFCustomSubgraphCall'):
CustomSubgraphCall.add_sub_graph_call_output_tensors_transposes(node)
@staticmethod
def make_shape_4d(shape: np.array):
@ -263,21 +259,19 @@ class CustomSubgraphCall(MiddleReplacementPattern):
src_node.shape, dst_node.type))
CustomSubgraphCall.add_reshape_before_op_node(graph, src_node_name, dst_node_name, edge_attrs)
for node_name in list(graph.nodes()):
node = Node(graph, node_name)
if node['kind'] == 'op' and node.has_and_set('type') and node.type == 'TFCustomSubgraphCall':
for index, data_node in node.out_nodes().items():
real_dims_count = len(data_node.shape)
if real_dims_count != 4:
log.info(
"There is an data tensor of shape '{}' with real dims count '{}' which goes out of '{}' "
"node".format(data_node.shape, real_dims_count, node.name))
CustomSubgraphCall.add_reshape_after_data_node(graph, data_node.id)
for node in graph.get_op_nodes(op='TFCustomSubgraphCall'):
for index, data_node in node.out_nodes().items():
real_dims_count = len(data_node.shape)
if real_dims_count != 4:
log.info(
"There is an data tensor of shape '{}' with real dims count '{}' which goes out of '{}' "
"node".format(data_node.shape, real_dims_count, node.name))
CustomSubgraphCall.add_reshape_after_data_node(graph, data_node.id)
# need to update shape of the op so IE generates XML with 4D tensors
out_shape = CustomSubgraphCall.make_shape_4d(data_node['shape'])
# need to update shape of the op so IE generates XML with 4D tensors
out_shape = CustomSubgraphCall.make_shape_4d(data_node['shape'])
data_node['shape'] = out_shape
data_node['shape'] = out_shape
@staticmethod
def add_sub_graph_call_output_tensors_transposes(node: Node):

View File

@ -28,11 +28,11 @@ class Cast(Op):
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'op': __class__.op,
'op': self.op,
'type': 'Convert',
'version': 'opset1',
'infer': __class__.infer,
'type_infer': __class__.type_infer,
'infer': self.infer,
'type_infer': self.type_infer,
'dst_type': None,
'in_ports_count': 1,
'out_ports_count': 1,
@ -40,10 +40,7 @@ class Cast(Op):
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
if self.ir_version == 10:
return [('destination_type', lambda node: np_data_type_to_destination_type(node.dst_type))]
else:
return [('precision', lambda node: np_data_type_to_precision(node.dst_type))]
return [('destination_type', lambda node: np_data_type_to_destination_type(node.dst_type))]
@staticmethod
def type_infer(node: Node):

View File

@ -42,10 +42,7 @@ class DepthToSpaceOp(Op):
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
if self.ir_version == 10:
return ['mode', 'block_size']
else:
return []
return ['mode', 'block_size']
@staticmethod
def infer(node: Node):

View File

@ -18,7 +18,6 @@ from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.partial_infer.utils import mark_input_bins
from mo.graph.graph import Graph, Node
from mo.ops.op import Op
from mo.utils.utils import convert_param_type
class NormalizeOp(Op):
@ -27,12 +26,12 @@ class NormalizeOp(Op):
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': __class__.op,
'op': __class__.op,
'type': self.op,
'op': self.op,
'eps': None,
'in_ports_count': 2,
'out_ports_count': 1,
'infer': __class__.infer
'infer': self.infer
}, attrs)
if 'across_spatial' in self.attrs and isinstance(self.attrs['across_spatial'], str):
@ -46,8 +45,10 @@ class NormalizeOp(Op):
def supported_attrs(self):
return ['eps', 'eps_mode',
('across_spatial', lambda node: convert_param_type(node, 'across_spatial', bool, int)),
('channel_shared', lambda node: convert_param_type(node, 'channel_shared', bool, int)),
('across_spatial',
lambda node: bool(node.across_spatial) if node.has_valid('across_spatial') else None),
('channel_shared',
lambda node: bool(node.channel_shared) if node.has_valid('channel_shared') else None),
]
@staticmethod

View File

@ -25,13 +25,11 @@ class OneHot(Op):
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'type': self.op,
'op': self.op,
'version': 'opset1',
'axis': -1,
'infer': __class__.infer,
'on_value': None,
'off_value': None,
'infer': self.infer,
'out_ports_count': 1,
'in_ports_count': 4,
'data_type': None,
@ -41,10 +39,7 @@ class OneHot(Op):
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
if self.ir_version < 10:
return ['axis', 'on_value', 'off_value', 'depth',]
else:
return ['axis']
return ['axis']
@staticmethod
def infer(node: Node):
@ -52,14 +47,10 @@ class OneHot(Op):
assert indices_shape is not None
dim = indices_shape.size
if node.in_port(1).disconnected(): # IR v7 version
assert node.has_valid('depth'), 'The node "{}" must have attribute "depth"'.format(node.name)
depth = node.depth
else:
assert_msg = "OneHot `{0}` ({1} input port value) should be scalar: node: `{2}`, {0} value: `{3}`"
depth = node.in_port(1).data.get_value()
assert depth is not None and depth.ndim == 0, assert_msg.format('depth', '1', node.name, depth)
depth = depth.item(0)
assert_msg = "OneHot `{0}` ({1} input port value) should be scalar: node: `{2}`, {0} value: `{3}`"
depth = node.in_port(1).data.get_value()
assert depth is not None and depth.ndim == 0, assert_msg.format('depth', '1', node.name, depth)
depth = depth.item(0)
assert node.has_valid('axis')
axis = node['axis']

View File

@ -47,13 +47,10 @@ class Parameter(Op):
node.out_port(0).set_data_type(node.data_type)
def supported_attrs(self):
if self.ir_version == 10:
return [
('shape', lambda node: ','.join([str(i) for i in node.shape])),
('element_type', lambda node: np_data_type_to_destination_type(node.data_type)),
]
else:
return []
return [
('shape', lambda node: ','.join([str(i) for i in node.shape])),
('element_type', lambda node: np_data_type_to_destination_type(node.data_type)),
]
@staticmethod
def infer(node):

View File

@ -17,12 +17,11 @@
import numpy as np
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.partial_infer.utils import mark_input_bins
from mo.graph.graph import Graph
from mo.ops.op import Op
class PreluOp(Op):
class PReLU(Op):
op = 'PReLU'
enabled = True
@ -40,12 +39,6 @@ class PreluOp(Op):
'out_ports_count': 1,
}, attrs)
def supported_attrs(self):
if self.ir_version != 10:
return ['channel_shared', 'filler_type', 'filler_value', 'min', 'max', 'mean', 'std', 'sparse', 'variance_norm']
else:
return []
@staticmethod
def infer(node):
if len(node.in_nodes()) == 2:

View File

@ -42,10 +42,7 @@ class SpaceToDepth(Op):
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
if self.ir_version == 10:
return ['mode', 'block_size']
else:
return []
return ['mode', 'block_size']
@staticmethod
def infer(node: Node):

View File

@ -23,21 +23,22 @@ from mo.utils.error import Error
class TensorIterator(Op):
''' Loop layer that iterates over tensors and execute embedded sub-graph.
'''
"""
Loop layer that iterates over tensors and execute embedded sub-graph.
"""
op = 'TensorIterator'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'type': self.op,
'op': self.op,
'version': 'opset1',
'input_port_map': [], # a list of dicts with such attrs as external_port_id, etc.
'output_port_map': [], # a list of dicts with such attrs as external_port_id, etc.
'back_edges': [], # a list of dicts with such attrs as from_layer, from_port, etc.
'body': None, # an Graph object with a body sub-graph
'sub_graphs': ['body'], # built-in attribute with all sub-graphg
'sub_graphs': ['body'], # built-in attribute with all sub-graph
'infer': self.infer,
'type_infer': self.ti_type_infer,
}
@ -91,7 +92,7 @@ class TensorIterator(Op):
if direction == 'in':
edges = node.in_edges()
if direction == 'out':
else:
edges = node.out_edges()
suitable_edges = {}
@ -222,8 +223,8 @@ class TensorIterator(Op):
for dst in internal_node.out_port(out_port).get_destinations():
possible_output_node = dst.node
if possible_output_node.soft_get('type') == 'Result':
assert internal_output_node is None, 'Several Result operations on the same output port of {}'.format(
internal_node)
assert internal_output_node is None, 'Several Result operations on the same output port of {}' \
''.format(internal_node)
internal_output_node = possible_output_node
assert internal_output_node is not None
TensorIterator.update_back_edge_map(ti=ti, direction='from', old_layer_id=internal_node_id,
@ -256,12 +257,6 @@ class TensorIterator(Op):
('to-port', 'to_port'),
]
gen_port_map = lambda node, port_map: self.generate_port_map_v10(node, port_map) \
if self.ir_version == 10 else self.generate_port_map(node, port_map)
gen_back_map = lambda node: self.generate_back_edges_v10(node) \
if self.ir_version == 10 else self.generate_back_edges(node)
new_attrs.update({
'IE': [(
'layer',
@ -270,13 +265,13 @@ class TensorIterator(Op):
('data', self.backend_attrs() + self.default_backend_attrs, []),
'@ports',
('port_map', [], [
('@list', lambda node: gen_port_map(node, node.input_port_map),
('@list', lambda node: self.generate_port_map(node, node.input_port_map),
('input', port_map_attrs, [])),
('@list', lambda node: gen_port_map(node, node.output_port_map),
('@list', lambda node: self.generate_port_map(node, node.output_port_map),
('output', port_map_attrs, [])),
]),
('back_edges', [], [
('@list', lambda node: gen_back_map(node), ('edge', back_edges_attrs, [])),
('@list', lambda node: self.generate_back_edges(node), ('edge', back_edges_attrs, [])),
]),
('body', [], [('@network', 'body')]),
])]
@ -303,23 +298,6 @@ class TensorIterator(Op):
@staticmethod
def generate_port_map(node: Node, src_port_map):
""" Extract port_map attributes from node and node.body attributes.
It iterates over src_port_map and substitude external_port_id, internal_port_id and
internal_layer_id by real values queried from node ports and node.body attributes.
"""
result_list = []
for map_item in src_port_map:
result = dict(map_item)
assert result is not map_item
result['external_port_id'] = __class__.find_port_id(node, result['external_port_id'], 'external_port_id')
result['internal_layer_id'], result['internal_port_id'] = __class__.find_internal_layer_and_port(
node.body, result['internal_layer_id'], result['internal_port_id'])
result_list.append(result)
return result_list
@staticmethod
def generate_port_map_v10(node: Node, src_port_map):
""" Extract port_map attributes from node and node.body attributes.
It iterates over src_port_map and substitude external_port_id, internal_port_id and
@ -336,20 +314,6 @@ class TensorIterator(Op):
@staticmethod
def generate_back_edges(node: Node):
''' Extract back_edges attributes from node and node.body attributes. '''
result_list = []
for back_edge in node.back_edges:
result = dict(back_edge)
assert result is not back_edge
result['from_layer'], result['from_port'] = __class__.find_internal_layer_and_port(
node.body, result['from_layer'], result['from_port'])
result['to_layer'], result['to_port'] = __class__.find_internal_layer_and_port(
node.body, result['to_layer'], result['to_port'])
result_list.append(result)
return result_list
@staticmethod
def generate_back_edges_v10(node: Node):
''' Extract back_edges attributes from node and node.body attributes. '''
result_list = []
for back_edge in node.back_edges:

View File

@ -21,8 +21,7 @@ import numpy as np
from extensions.ops.topk import TopK
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, \
connect, FakeAttr
from mo.utils.unittest.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect
class TestTopKInfer(unittest.TestCase):
@ -46,9 +45,7 @@ class TestTopKInfer(unittest.TestCase):
('topk_d2', 'output_2'),
], nodes_with_edges_only=True)
def test_topk_infer_v10_opset1(self):
self.graph.graph['cmd_params'] = FakeAttr(ir_version=10)
def test_topk_infer_opset1(self):
topk_node = Node(self.graph, 'topk')
topk_node['version'] = 'opset1'
TopK.infer(topk_node)
@ -59,9 +56,7 @@ class TestTopKInfer(unittest.TestCase):
self.assertTrue(topk_node.out_port(0).get_data_type() == np.float32)
self.assertTrue(topk_node.out_port(1).get_data_type() == np.int32)
def test_topk_infer_v10_i64_opset3(self):
self.graph.graph['cmd_params'] = FakeAttr(ir_version=10)
def test_topk_infer_i64_opset3(self):
topk_node = Node(self.graph, 'topk')
topk_node['version'] = 'opset3'
topk_node['index_element_type'] = np.int64
@ -73,9 +68,7 @@ class TestTopKInfer(unittest.TestCase):
self.assertTrue(topk_node.out_port(0).get_data_type() == np.float32)
self.assertTrue(topk_node.out_port(1).get_data_type() == np.int64)
def test_topk_infer_v10_i32_opset3(self):
self.graph.graph['cmd_params'] = FakeAttr(ir_version=10)
def test_topk_infer_i32_opset3(self):
topk_node = Node(self.graph, 'topk')
topk_node['version'] = 'opset3'
topk_node['index_element_type'] = np.int32

View File

@ -150,7 +150,7 @@ def node_defs_to_str(node: Node):
def update_ie_fields(attrs: dict, ir_version = None):
ir_v4_attrs = {
ir_v10_attrs = {
'IE': [(
'layer',
[('id', lambda node: node.node), 'name', 'type', 'version'],
@ -308,336 +308,10 @@ def update_ie_fields(attrs: dict, ir_version = None):
'@consts'])]
}
ir_v3_attrs = {
'IE': [(
'layer',
[('id', lambda node: node.node), 'name', 'precision', 'type', 'version'],
[
(
'data',
[
'auto_pad',
'epsilon',
'min',
'max',
('axis', lambda node: attr_getter(node, 'axis')),
'tiles',
('dim', lambda node: attr_getter(node, 'dim')),
'num_axes',
('pool-method', 'pool_method'),
'group',
('rounding-type', 'rounding_type'),
('exclude-pad', 'exclude_pad'),
'operation',
'out-size',
'power',
'shift',
'alpha',
'beta',
'coords',
'classes',
'num',
('local-size', 'local_size'),
'region',
'knorm',
'bias',
'num_classes',
'keep_top_k',
'variance_encoded_in_target',
'code_type',
'share_location',
'nms_threshold',
'confidence_threshold',
'background_label_id',
'top_k',
'eta',
'visualize',
'visualize_threshold',
'save_file',
'output_directory',
'output_name_prefix',
'output_format',
'label_map_file',
'name_size_file',
'num_test_image',
'prob',
'resize_mode',
'height',
'width',
'height_scale',
'width_scale',
'pad_mode',
'pad_value',
'interp_mode',
'img_size',
'img_h',
'img_w',
'step',
'step_h',
'step_w',
('offset', lambda node: attr_getter(node, 'offset')),
'variance',
'flip',
'clip',
('min_size', lambda node: attr_getter(node, 'min_size')),
('max_size', lambda node: attr_getter(node, 'max_size')),
('aspect_ratio', lambda node: attr_getter(node, 'aspect_ratio')),
'decrease_label_id',
'normalized',
'scale_all_sizes',
('type', 'norm_type'),
'eps',
'eps_mode',
'across_spatial',
'channel_shared',
'negative_slope',
'engine',
'num_filter',
('type', 'sample_type'),
('order', lambda node: attr_getter(node, 'order')),
'pooled_h',
'pooled_w',
'spatial_scale',
'cls_threshold',
'max_num_proposals',
'iou_threshold',
'min_bbox_size',
'feat_stride',
'pre_nms_topn',
'post_nms_topn',
('type', lambda node: node['filler_type'] if node.has('filler_type') else None),
('value', lambda node: node['filler_value'] if node.has('filler_value') else None),
('output',
lambda node: node.output_shape[node.channel_dims][0] if node.has('output_shape') and node.has(
'channel_dims') else None),
('input_nodes_names', lambda node: ' '.join(node['input_nodes_names']) if node.has(
'input_nodes_names') else None),
('output_tensors_names', lambda node: ' '.join(node['output_tensors_names']) if node.has(
'output_tensors_names') else None),
('real_input_dims', lambda node: ';'.join([' '.join(map(str, shape)) for shape in
node['real_input_dims']])
if node.has('real_input_dims') else None),
('protobuf', lambda node: node_defs_to_str(node) if node.has('pbs') else None),
{'custom_attributes': None},
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims])) if node.has_valid('stride') else None),
('kernel', lambda node: ','.join(map(str, node['kernel_spatial'])) if node.has_valid(
'kernel_spatial') else None),
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims])) if node.has_valid('dilation') else None),
('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0))) if node.has_valid('pad') else None),
('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1))) if node.has_valid('pad') else None),
('scale', lambda node: attr_getter(node, 'scale')),
'crop_width',
'crop_height',
'write_augmented',
'max_multiplier',
'augment_during_test',
'recompute_mean',
'write_mean',
'mean_per_pixel',
'mode',
'bottomwidth',
'bottomheight',
'chromatic_eigvec',
'kernel_size',
'max_displacement',
'stride_1',
'stride_2',
'single_direction',
'do_abs',
'correlation_type',
'antialias',
'resample_type',
'factor',
'coeff',
('ratio', lambda node: attr_getter(node, 'ratio')),
'size',
],
[]),
'@ports',
'@consts'])]
}
ir_v2_attrs = {
'IE': [(
'layer',
[('id', lambda node: node.node), 'name', 'precision', 'type', 'version'],
[
(
'data',
[
'auto_pad',
'epsilon',
'min',
'max',
('axis', lambda node: attr_getter(node, 'axis')),
'tiles',
('dim', lambda node: attr_getter(node, 'dim')),
'num_axes',
('pool-method', 'pool_method'),
'group',
('rounding-type', 'rounding_type'),
('exclude-pad', 'exclude_pad'),
'operation',
'out-size',
'power',
'shift',
'alpha',
'beta',
'coords',
'classes',
'num',
('local-size', 'local_size'),
'region',
'knorm',
'num_classes',
'keep_top_k',
'variance_encoded_in_target',
'code_type',
'share_location',
'nms_threshold',
'confidence_threshold',
'background_label_id',
'top_k',
'eta',
'visualize',
'visualize_threshold',
'save_file',
'output_directory',
'output_name_prefix',
'output_format',
'label_map_file',
'name_size_file',
'num_test_image',
'prob',
'resize_mode',
'height',
'width',
'height_scale',
'width_scale',
'pad_mode',
'pad_value',
'interp_mode',
'img_size',
'img_h',
'img_w',
'step',
'step_h',
'step_w',
('offset', lambda node: attr_getter(node, 'offset')),
'variance',
'flip',
'clip',
('min_size', lambda node: attr_getter(node, 'min_size')),
('max_size', lambda node: attr_getter(node, 'max_size')),
('aspect_ratio', lambda node: attr_getter(node, 'aspect_ratio')),
'decrease_label_id',
'normalized',
'scale_all_sizes',
('type', 'norm_type'),
'eps',
'across_spatial',
'channel_shared',
'negative_slope',
'engine',
'num_filter',
('type', 'sample_type'),
('order', lambda node: attr_getter(node, 'order')),
'pooled_h',
'pooled_w',
'spatial_scale',
'cls_threshold',
'max_num_proposals',
'iou_threshold',
'min_bbox_size',
'feat_stride',
'pre_nms_topn',
'post_nms_topn',
('type', lambda node: node['filler_type'] if node.has('filler_type') else None),
('value', lambda node: node['filler_value'] if node.has('filler_value') else None),
('output',
lambda node: node.output_shape[node.channel_dims][0] if node.has('output_shape') and node.has(
'channel_dims') else None),
('input_nodes_names', lambda node: ' '.join(node['input_nodes_names']) if node.has(
'input_nodes_names') else None),
('output_tensors_names', lambda node: ' '.join(node['output_tensors_names']) if node.has(
'output_tensors_names') else None),
('real_input_dims', lambda node: ';'.join([' '.join(map(str, shape)) for shape in
node['real_input_dims']])
if node.has('real_input_dims') else None),
('protobuf', lambda node: node_defs_to_str(node) if node.has('pbs') else None),
{'custom_attributes': None},
spatial_getter('stride-x', 'stride', 1), # TODO check whether it is really X or Y
spatial_getter('stride-y', 'stride', 0), # TODO check whether it is really X or Y
spatial_getter('kernel-x', 'window', 1), # TODO check whether it is really X or Y
spatial_getter('kernel-y', 'window', 0), # TODO check whether it is really X or Y
('kernel-x', lambda node: kernel_getter(node, 1)), # TODO check whether it is really X or Y
('kernel-y', lambda node: kernel_getter(node, 0)), # TODO check whether it is really X or Y
spatial_getter('dilation-x', 'dilation', 1), # TODO check whether it is really X or Y
spatial_getter('dilation-y', 'dilation', 0), # TODO check whether it is really X or Y
spatial_getter('pad-x', 'pad', 1, lambda x: x[0]), # TODO check whether it is really X or Y
spatial_getter('pad-y', 'pad', 0, lambda x: x[0]), # TODO check whether it is really X or Y
spatial_getter('pad-r', 'pad', 1, lambda x: x[1]), # TODO check whether it is really X or Y
spatial_getter('pad-b', 'pad', 0, lambda x: x[1]), # TODO check whether it is really X or Y
('scale', lambda node: attr_getter(node, 'scale')),
('stride', lambda node: attr_getter(node, 'stride')),
'crop_width',
'crop_height',
'write_augmented',
'max_multiplier',
'augment_during_test',
'recompute_mean',
'write_mean',
'mean_per_pixel',
'mode',
'bottomwidth',
'bottomheight',
'chromatic_eigvec',
'kernel_size',
'max_displacement',
'stride_1',
'stride_2',
'single_direction',
'do_abs',
'correlation_type',
'antialias',
'resample_type',
'factor',
'coeff',
('ratio', lambda node: attr_getter(node, 'ratio')),
'size',
],
[]),
'@ports',
'@consts'])]
}
ir_version_mapping = {
# Default behaviour is IR V4 attributes
None: ir_v4_attrs,
10: ir_v4_attrs,
7: ir_v4_attrs,
6: ir_v3_attrs,
5: ir_v3_attrs,
4: ir_v3_attrs,
3: ir_v3_attrs,
2: ir_v2_attrs
# Default behaviour is IR V10 attributes
None: ir_v10_attrs,
10: ir_v10_attrs,
}
if ir_version not in ir_version_mapping.keys():

View File

@ -975,9 +975,6 @@ class Graph(nx.MultiDiGraph):
if undead_node_types is None:
undead_node_types = []
if 'fw' in self.graph and self.graph['fw'] == 'tf':
undead_node_types.append('TFCustomSubgraphCall')
if 'cmd_params' in self.graph and getattr(self.graph['cmd_params'], 'keep_shape_ops'):
undead_node_types.extend(['ShapeOf', 'Shape', 'slice_like'])

View File

@ -39,9 +39,6 @@ nodes_attributes = {'placeholder_1': {'type': 'Parameter', 'kind': 'op', 'op': '
'data_node_4': {'value': None, 'kind': 'data'},
'data_node_5': {'value': None, 'shape': None, 'kind': 'data'},
'data_node_6': {'value': None, 'shape': None, 'kind': 'data'},
'tf_call_1': {'type': 'TFCustomSubgraphCall', 'kind': 'op', 'op': 'TFCustomSubgraphCall'},
'tf_call_2': {'type': 'TFCustomSubgraphCall', 'kind': 'op', 'op': 'TFCustomSubgraphCall'},
'tf_call_3': {'type': 'TFCustomSubgraphCall', 'kind': 'op', 'op': 'TFCustomSubgraphCall'},
'op_output': {'kind': 'op', 'op': 'Result'},
'op_output_1': {'kind': 'op', 'op': 'Result'},
'op_output_2': {'kind': 'op', 'op': 'Result'}

View File

@ -89,7 +89,6 @@ def _fused_batch_norm_decomposition(graph: Graph, tinput: Port, toutput: Port, g
This is common function for TF, Caffe and MXNet
It creates Mul->Add->Mul->Add sub graph
"""
shape = tinput.data.get_shape()
batch_norm_name = tinput.get_connection().get_destination().node.name
# Create first Mul & Add operations

View File

@ -51,16 +51,13 @@ class Const(Op):
self.attrs['data_type'] = data_type_str_to_np(self.attrs['force_type'])
def supported_attrs(self):
if self.ir_version == 10:
return [
'offset',
'size',
('shape', lambda node: ','.join([str(i) for i in node.shape])),
('element_type', lambda node: precision_to_destination_type(node.force_type)
if node.has_valid('force_type') else np_data_type_to_destination_type(node.value.dtype)),
]
else:
return []
return [
'offset',
'size',
('shape', lambda node: ','.join([str(i) for i in node.shape])),
('element_type', lambda node: precision_to_destination_type(node.force_type)
if node.has_valid('force_type') else np_data_type_to_destination_type(node.value.dtype)),
]
@staticmethod
def type_infer(node):

View File

@ -32,10 +32,10 @@ class Convolution(Op):
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': __class__.op,
'op': __class__.op,
'type': self.op,
'op': self.op,
'version': 'opset1',
'infer': __class__.infer,
'infer': self.infer,
'multiplication_transparent': True,
'multiplication_transparent_ports': [(0, 0), (1, 0)],
'in_ports_count': 3,
@ -43,64 +43,28 @@ class Convolution(Op):
}, attrs)
def backend_attrs(self):
if self.ir_version == 10:
def pad_attribute_helper(node: Node, pad_type: str='begin'):
assert pad_type in ['begin', 'end']
if not node.has_valid('pad'):
return None
pad = get_backend_pad(node.pad, node.spatial_dims, 0 if pad_type == 'begin' else 1)
if node.has_valid('auto_pad'):
pad = [0 for _ in pad]
return ','.join(map(str, pad))
def pad_attribute_helper(node: Node, pad_type: str='begin'):
assert pad_type in ['begin', 'end']
if not node.has_valid('pad'):
return None
pad = get_backend_pad(node.pad, node.spatial_dims, 0 if pad_type == 'begin' else 1)
if node.has_valid('auto_pad'):
pad = [0 for _ in pad]
return ','.join(map(str, pad))
return [
'auto_pad',
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))),
('pads_begin', lambda node: pad_attribute_helper(node, 'begin')),
('pads_end', lambda node: pad_attribute_helper(node, 'end')),
('output_padding', lambda node: ','.join(map(str, node.output_padding[node.spatial_dims])) \
if node.has_valid('output_padding') else None),
# for BinaryConvolution only
'pad_value',
'mode',
]
return [
'auto_pad',
'group',
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))),
('kernel', lambda node: ','.join(map(str, node['kernel_spatial'])) \
if node.has_valid('kernel_spatial') else None),
('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))),
('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))),
'output',
'pad_value',
'mode',
'input',
]
def backend_attrs_v2(self):
return [
spatial_getter('stride-x', 'stride', 1),
spatial_getter('stride-y', 'stride', 0),
('kernel-x', lambda node: node.kernel_spatial[1]),
('kernel-y', lambda node: node.kernel_spatial[0]),
spatial_getter('dilation-x', 'dilation', 0),
spatial_getter('dilation-y', 'dilation', 1),
spatial_getter('pad-x', 'pad', 1, lambda x: x[0]),
spatial_getter('pad-y', 'pad', 0, lambda x: x[0]),
spatial_getter('pad-r', 'pad', 1, lambda x: x[1]),
spatial_getter('pad-b', 'pad', 0, lambda x: x[1]),
'auto_pad',
'output',
'group',
]
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))),
('pads_begin', lambda node: pad_attribute_helper(node, 'begin')),
('pads_end', lambda node: pad_attribute_helper(node, 'end')),
('output_padding', lambda node: ','.join(map(str, node.output_padding[node.spatial_dims])) \
if node.has_valid('output_padding') else None),
# for BinaryConvolution only
'pad_value',
'mode',
]
@staticmethod
def calc_convolution(input_spatial_shape, stride_spatial_shape, pad_spatial_shape, kernel_extent):

View File

@ -16,8 +16,7 @@
import numpy as np
from mo.front.common.partial_infer.utils import mark_input_bins, assign_dims_to_weights, \
tf_window_op_pad_infer
from mo.front.common.partial_infer.utils import mark_input_bins, assign_dims_to_weights, tf_window_op_pad_infer
from mo.front.extractor import spatial_getter
from mo.front.onnx.extractors.utils import get_backend_pad
from mo.graph.graph import Node, Graph
@ -30,58 +29,25 @@ class Deconvolution(Op):
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': __class__.op,
'op': __class__.op,
'type': self.op,
'op': self.op,
'version': 'opset1',
'infer': __class__.infer,
'infer': self.infer,
'in_ports_count': 3,
'out_ports_count': 1,
}, attrs)
def backend_attrs(self):
if self.ir_version == 10:
return [
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))),
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
('pads_begin',
lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0))) if node.has_valid(
'pad') else None),
('pads_end',
lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1))) if node.has_valid(
'pad') else None),
'auto_pad',
]
return [
('dilations',
lambda node: ','.join(map(str, node['dilation'][node.spatial_dims])) if node.has_valid('dilation')
else None),
'auto_pad',
'group',
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))),
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
('kernel', lambda node: ','.join(map(str, node['kernel_spatial']))),
('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))),
('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))),
'output'
]
def backend_attrs_v2(self):
return [
spatial_getter('stride-x', 'stride', 1),
spatial_getter('stride-y', 'stride', 0),
('kernel-x', lambda node: node.kernel_spatial[1]),
('kernel-y', lambda node: node.kernel_spatial[0]),
spatial_getter('pad-x', 'pad', 1, lambda x: x[0]),
spatial_getter('pad-y', 'pad', 0, lambda x: x[0]),
spatial_getter('pad-r', 'pad', 1, lambda x: x[1]),
spatial_getter('pad-b', 'pad', 0, lambda x: x[1]),
('pads_begin',
lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0))) if node.has_valid(
'pad') else None),
('pads_end',
lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1))) if node.has_valid(
'pad') else None),
'auto_pad',
'output',
'group',
]
@staticmethod

View File

@ -21,8 +21,7 @@ from collections import namedtuple
import networkx as nx
import numpy as np
from mo.front.extractor import add_attrs_props
from mo.front.extractor import update_ie_fields
from mo.front.extractor import add_attrs_props, update_ie_fields
from mo.graph.graph import Node, Graph
from mo.utils import class_registration
from mo.utils.error import Error
@ -74,12 +73,6 @@ class Op(object):
backend_attrs_mapping = {
None: self.backend_attrs,
10: self.backend_attrs,
7: self.backend_attrs,
6: self.backend_attrs,
5: self.backend_attrs,
4: self.backend_attrs,
3: self.backend_attrs,
2: self.backend_attrs_v2
}
if self.ir_version not in backend_attrs_mapping.keys():
@ -103,7 +96,7 @@ class Op(object):
else:
node = node_port
port = 0
# 'data' nodes do not have 'out' edge attibute but always has one output
# 'data' nodes do not have 'out' edge attribute but always has one output
out_ids = [attr['out'] for _, __, attr in node.graph.out_edges(node.id, data=True) if 'out' in attr]
if len(set(out_ids)) > 1 and not isinstance(node_port, tuple):
raise Error('Node {} has more than one outputs. Provide output port explicitly. '.format(node.name))
@ -213,10 +206,9 @@ class Op(object):
[np.array_equal(old_data_value[id], data_node.value) for id, data_node in enumerate(data_nodes)])
assert all(old_shape is None for old_shape in old_data_shape) or all(
[np.array_equal(old_data_shape[id], data_node.shape) for id, data_node in enumerate(data_nodes)]), \
"After re-inference of {} node, old and new shapes do not match. Old shapes: {}, new shapes: {}.".format(
new_op_node.soft_get('name'),
[old_data_shape[id] for id in range(len(data_nodes))],
[data_node.shape for data_node in data_nodes])
"After re-inference of {} node, old and new shapes do not match. Old shapes: {}, new shapes: {}." \
"".format(new_op_node.soft_get('name'), [old_data_shape[id] for id in range(len(data_nodes))],
[data_node.shape for data_node in data_nodes])
for data_node in data_nodes:
if log.getLogger().isEnabledFor(log.DEBUG):
log.debug(
@ -325,9 +317,6 @@ class Op(object):
"""
return self.supported_attrs()
def backend_attrs_v2(self):
return self.backend_attrs()
@staticmethod
def get_op_class_by_name(name: str):
return __class__.registered_ops[name]
@ -431,13 +420,11 @@ class PermuteAttrs:
# This function creates permutation on edge between node1->node2
edge_attrs = node1.graph.get_edge_data(node1.id, node2.id)[0]
if 'permutation' not in edge_attrs or override:
nx.set_edge_attributes(G=node1.graph,
values={(node1.id, node2.id, 0): permutation},
name='permutation')
nx.set_edge_attributes(G=node1.graph, values={(node1.id, node2.id, 0): permutation}, name='permutation')
else:
# If permutation exists we check that given and already set permutations are equal
if (edge_attrs['permutation'] is None and permutation is not None) or \
not np.array_equal(edge_attrs['permutation'], permutation):
not np.array_equal(edge_attrs['permutation'], permutation):
raise Error('Permutation already exists in edge between {} and {}'.format(node1.id, node2.id))
@staticmethod
@ -452,8 +439,8 @@ class PermuteAttrs:
def get_nhwc_to_nchw_permutation(dims_number: int):
# This function returns permutation from NHWC to NCHW for given dims number
if dims_number != 3:
perm = [0, dims_number - 1, *[x for x in range(1, dims_number - 1)]] if dims_number > 1 else [x for x in range(
dims_number)]
perm = [0, dims_number - 1, *[x for x in range(1, dims_number - 1)]] if dims_number > 1 else \
[x for x in range(dims_number)]
else:
# Exclude 3D shapes from permutation process: identity permutation
perm = list(range(0, dims_number))
@ -464,8 +451,7 @@ class PermuteAttrs:
def get_nchw_to_nhwc_permutation(dims_number: int):
# This function returns permutation from NCHW to NHWC for given dims number
if dims_number != 3:
perm = [0, *[x for x in range(2, dims_number)], 1] if dims_number > 1 else [x for x in range(
dims_number)]
perm = [0, *[x for x in range(2, dims_number)], 1] if dims_number > 1 else [x for x in range(dims_number)]
else:
# Exclude 3D shapes from permutation process: identity permutation
perm = list(range(0, dims_number))

View File

@ -53,24 +53,6 @@ class Pooling(Op):
'auto_pad',
]
def backend_attrs_v2(self):
return [
('stride', lambda node: attr_getter(node, 'stride')),
spatial_getter('stride-x', 'stride', 1),
spatial_getter('stride-y', 'stride', 0),
spatial_getter('kernel-x', 'window', 1),
spatial_getter('kernel-y', 'window', 0),
spatial_getter('pad-x', 'pad', 1, lambda x: x[0]),
spatial_getter('pad-y', 'pad', 0, lambda x: x[0]),
('pool-method', 'pool_method'),
('exclude-pad', 'exclude_pad'),
'rounding_type',
'auto_pad',
]
@staticmethod
def infer(node: Node):
assert (len(node.in_nodes()) == 1)

View File

@ -108,14 +108,3 @@ def get_mo_root_dir():
"""
return os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), os.pardir,
os.pardir))
def convert_param_type(node, attr_name: str, type_v1x, type_v7):
if attr_name in node.attrs() and node[attr_name] is not None:
if node.graph.graph['ir_version'] < 10:
node[attr_name] = type_v7(node[attr_name])
else:
node[attr_name] = type_v1x(node[attr_name])
return node[attr_name]
else:
return None