Actualize operations attributes (#3613)
* Fix missed/redundant attrs for some operations * Align auto_pad attr values in spec * Update MO IR Reader extenders for appropriate operations * Allign auto_pad attr values for appropriate operations * Remove changes in extenders * Update backend_attrs for some operations * Changes in shape_infer functions to correct work with explicit mode * Apply offline comments
This commit is contained in:
parent
29f1c38ba0
commit
a788c02c3d
@ -75,7 +75,7 @@ else:
|
||||
* *auto_pad*
|
||||
|
||||
* **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor.
|
||||
* None (not specified): use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||
* *valid* - do not use padding.
|
||||
* **Type**: string
|
||||
|
@ -70,7 +70,7 @@ n_{out} = \left ( \frac{n_{in} + 2p - k}{s} \right ) + 1
|
||||
* *auto_pad*
|
||||
|
||||
* **Description**: *auto_pad* how the padding is calculated. Possible values:
|
||||
* None (not specified): use explicit padding values.
|
||||
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||
* *valid* - do not use padding.
|
||||
* **Type**: string
|
||||
|
@ -45,7 +45,7 @@
|
||||
* *auto_pad*
|
||||
|
||||
* **Description**: *auto_pad* how the padding is calculated. Possible values:
|
||||
* None (not specified): use explicit padding values.
|
||||
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||
* *valid* - do not use padding.
|
||||
* **Type**: string
|
||||
|
@ -77,7 +77,7 @@ else:
|
||||
* *auto_pad*
|
||||
|
||||
* **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor.
|
||||
* None (not specified): use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||
* *valid* - do not use padding.
|
||||
* **Type**: string
|
||||
|
@ -47,7 +47,7 @@
|
||||
* *auto_pad*
|
||||
|
||||
* **Description**: *auto_pad* how the padding is calculated. Possible values:
|
||||
* None (not specified): use explicit padding values.
|
||||
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||
* *valid* - do not use padding.
|
||||
* **Type**: string
|
||||
|
@ -64,7 +64,7 @@
|
||||
* *auto_pad*
|
||||
|
||||
* **Description**: *auto_pad* how the padding is calculated. Possible values:
|
||||
* None (not specified): use explicit padding values.
|
||||
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||
* *valid* - do not use padding.
|
||||
* **Type**: string
|
||||
|
@ -57,7 +57,7 @@
|
||||
* *auto_pad*
|
||||
|
||||
* **Description**: *auto_pad* how the padding is calculated. Possible values:
|
||||
* *explicit*: use explicit padding values.
|
||||
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
|
||||
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||
* *valid* - do not use padding.
|
||||
* **Type**: string
|
||||
|
@ -63,7 +63,8 @@ class Elementwise(Op):
|
||||
'in_ports_count': 2,
|
||||
'out_ports_count': 1,
|
||||
'is_eltwise': True,
|
||||
'stop_value_propagation': False
|
||||
'stop_value_propagation': False,
|
||||
'auto_broadcast': 'numpy'
|
||||
}, attrs)
|
||||
|
||||
@staticmethod
|
||||
@ -71,6 +72,9 @@ class Elementwise(Op):
|
||||
override_data_type_of_constant(node)
|
||||
node.out_port(0).set_data_type(node.in_port(0).get_data_type())
|
||||
|
||||
def backend_attrs(self):
|
||||
return ['auto_broadcast']
|
||||
|
||||
|
||||
class UnaryElementwise(Elementwise):
|
||||
def __init__(self, graph: Graph, attrs: dict):
|
||||
@ -82,6 +86,9 @@ class UnaryElementwise(Elementwise):
|
||||
def type_infer(node):
|
||||
copy_type_infer(node)
|
||||
|
||||
def backend_attrs(self):
|
||||
return []
|
||||
|
||||
|
||||
class Add(Elementwise):
|
||||
op = 'Add'
|
||||
|
@ -49,6 +49,7 @@ class FakeQuantize(Op):
|
||||
'infer': self.infer,
|
||||
'in_ports_count': 5,
|
||||
'out_ports_count': 1,
|
||||
'auto_broadcast': 'numpy'
|
||||
}
|
||||
super().__init__(graph, mandatory_props, attrs)
|
||||
if self.attrs['levels'] is None:
|
||||
@ -57,6 +58,7 @@ class FakeQuantize(Op):
|
||||
def supported_attrs(self):
|
||||
return [
|
||||
'levels',
|
||||
'auto_broadcast'
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
|
@ -33,9 +33,13 @@ class Select(Op):
|
||||
'out_ports_count': 1,
|
||||
'infer': __class__.infer,
|
||||
'type_infer': __class__.type_infer,
|
||||
'auto_broadcast': 'numpy'
|
||||
}
|
||||
super().__init__(graph, mandatory_props, attrs)
|
||||
|
||||
def backend_attrs(self):
|
||||
return ['auto_broadcast']
|
||||
|
||||
@staticmethod
|
||||
def infer(node: Node):
|
||||
assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 3, "Select operation must have 3 inputs:" \
|
||||
|
@ -48,18 +48,21 @@ class Convolution(Op):
|
||||
if not node.has_valid('pad'):
|
||||
return None
|
||||
pad = get_backend_pad(node.pad, node.spatial_dims, 0 if pad_type == 'begin' else 1)
|
||||
if node.has_valid('auto_pad'):
|
||||
if node.has_valid('auto_pad') and node.auto_pad != 'explicit':
|
||||
pad = [0 for _ in pad]
|
||||
return ','.join(map(str, pad))
|
||||
|
||||
return [
|
||||
'auto_pad',
|
||||
('auto_pad', lambda node: node.auto_pad if node.has_valid('auto_pad') else 'explicit'),
|
||||
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
|
||||
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))),
|
||||
('pads_begin', lambda node: pad_attribute_helper(node, 'begin')),
|
||||
('pads_end', lambda node: pad_attribute_helper(node, 'end')),
|
||||
|
||||
# for Backpropdata operations only - according to spec
|
||||
('output_padding', lambda node: ','.join(map(str, node.output_padding[node.spatial_dims])) \
|
||||
if node.has_valid('output_padding') else None),
|
||||
if node.has_valid('output_padding') and node.type in
|
||||
('GroupConvolutionBackpropData', 'ConvolutionBackpropData') else None),
|
||||
|
||||
# for BinaryConvolution only
|
||||
'pad_value',
|
||||
@ -187,7 +190,7 @@ class Convolution(Op):
|
||||
# TensorFlow always has auto_pad attribute that can be either valid or same_upper
|
||||
# In ONNX auto_pad attribute is deprecated but appears in some models (could be valid, same_upper or same_lower)
|
||||
# Caffe do not use auto_pad attribute
|
||||
if node.has_valid('auto_pad') and not node.has_valid('output_spatial_shape'):
|
||||
if node.has_valid('auto_pad') and node.auto_pad != 'explicit' and not node.has_valid('output_spatial_shape'):
|
||||
node['pad_spatial_shape'], node['output_spatial_shape'] = tf_window_op_pad_infer(input_spatial_shape,
|
||||
kernel_extent,
|
||||
stride_spatial_shape,
|
||||
|
@ -48,7 +48,7 @@ class Pooling(Op):
|
||||
('exclude-pad', 'exclude_pad'),
|
||||
|
||||
'rounding_type',
|
||||
'auto_pad',
|
||||
('auto_pad', lambda node: node.auto_pad if node.has_valid('auto_pad') else 'explicit'),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
@ -80,7 +80,7 @@ class Pooling(Op):
|
||||
stride_spatial = node.stride[node.spatial_dims]
|
||||
assert any(stride_spatial), 'Stride can not be zero in node {}'.format(node.id)
|
||||
|
||||
if node.has_valid('auto_pad'):
|
||||
if node.has_valid('auto_pad') and node.auto_pad != 'explicit':
|
||||
node.pad_spatial_shape, node.output_spatial_shape = tf_window_op_pad_infer(input_spatial_shape,
|
||||
window_spatial_shape,
|
||||
stride_spatial, node.auto_pad)
|
||||
|
Loading…
Reference in New Issue
Block a user