Publishing 2020.2 content
This commit is contained in:
@@ -48,8 +48,8 @@ class Convolution(Op):
|
||||
'auto_pad',
|
||||
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
|
||||
('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))),
|
||||
('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))),
|
||||
('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))),
|
||||
('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0))) if node.has_valid('pad') else None),
|
||||
('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1))) if node.has_valid('pad') else None),
|
||||
('output_padding', lambda node: ','.join(map(str, node.output_padding[node.spatial_dims])) \
|
||||
if node.has_valid('output_padding') else None),
|
||||
|
||||
@@ -175,7 +175,7 @@ class Convolution(Op):
|
||||
|
||||
if not node.has_valid('output'):
|
||||
# restore the number of output feature maps from the second argument that is weights
|
||||
if node.type in ['Convolution', 'Deconvolution', 'DeformableConvolution']:
|
||||
if node.type in ['Convolution', 'Deconvolution', 'DeformableConvolution', 'BinaryConvolution']:
|
||||
node['output'] = kernel_shape[node.output_feature_channel]
|
||||
else:
|
||||
raise Error(
|
||||
@@ -221,7 +221,7 @@ class Convolution(Op):
|
||||
node.pad = pad
|
||||
else:
|
||||
pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)
|
||||
if node.type == 'Convolution':
|
||||
if node.type in ('Convolution', 'BinaryConvolution'):
|
||||
float_spatial = Convolution.calc_convolution(input_spatial_shape, stride_spatial_shape,
|
||||
pad_spatial_shape,
|
||||
kernel_extent)
|
||||
|
||||
@@ -13,49 +13,32 @@
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import logging as log
|
||||
|
||||
import numpy as np
|
||||
|
||||
from mo.graph.graph import Graph
|
||||
from mo.ops.op import Op, PermuteAttrs
|
||||
from mo.graph.perm_inputs import PermuteInputs
|
||||
from mo.ops.op import Op
|
||||
|
||||
|
||||
class Pad(Op):
|
||||
""" Pad operation that explicitly extends an input tensor at edges.
|
||||
""" Pad operation that explicitly extends an input tensor at borders.
|
||||
|
||||
This operation frequently appears in TF and rarely in ONNX models
|
||||
followed by some windowed operation like convolution or pooling.
|
||||
The operation extends each (not only spatial) dimensions of input
|
||||
tensors by new elements increasing output shape. The filling values
|
||||
is defined by 'mode' and 'fill_value' attributes, but usually it is zero
|
||||
padding.
|
||||
|
||||
The operation has two forms: with one or two input arguments.
|
||||
The first aruments is an input tensor to be padded. The second
|
||||
argument is an optional padding values of shape Nx2, where N is
|
||||
a number of dimensions in an input tensor:
|
||||
|
||||
[[pad_begin_dim1, pad_end_dim1],
|
||||
[pad_begin_dim2, pad_end_dim2],
|
||||
...
|
||||
[pad_begin_dimN, pad_end_dimN]]
|
||||
|
||||
where pad_begin_dim1 etc. are padding margins in elements. If the second
|
||||
input argument is omitted, then it is in 'pads' attribute in the same
|
||||
format.
|
||||
The operation extends each (not only spatial) dimensions of input tensors by new elements increasing output
|
||||
shape.
|
||||
The second and third inputs are 1D tensor with number of elements equal to input tensor rank. These inputs
|
||||
specify the begin and end paddings.
|
||||
The forth input specifies the fill valuu for 'constant' mode and not used for other cases.
|
||||
"""
|
||||
|
||||
op = 'Pad'
|
||||
enabled = True
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph: Graph, attrs: dict):
|
||||
super().__init__(graph, {
|
||||
'op': __class__.op,
|
||||
'type': __class__.op,
|
||||
'op': self.op,
|
||||
'type': self.op,
|
||||
'infer': __class__.infer,
|
||||
'in_ports_count': 2,
|
||||
'in_ports_count': 4,
|
||||
'out_ports_count': 1,
|
||||
'mode': 'constant',
|
||||
'fill_value': float(0),
|
||||
@@ -63,7 +46,6 @@ class Pad(Op):
|
||||
1: 'int64' if graph.graph['cmd_params'].generate_experimental_IR_V10 else 'int32',
|
||||
2: 'int64' if graph.graph['cmd_params'].generate_experimental_IR_V10 else 'int32',
|
||||
},
|
||||
'pads': None
|
||||
}, attrs)
|
||||
|
||||
def supported_attrs(self):
|
||||
@@ -78,50 +60,80 @@ class Pad(Op):
|
||||
|
||||
@staticmethod
|
||||
def infer(node):
|
||||
PermuteAttrs.create_permute_attrs(node, attrs=[('pads', 'input:0')])
|
||||
pad_node_name = node.soft_get('name', node.id)
|
||||
|
||||
num_of_inputs = len(node.in_nodes())
|
||||
if node.has_valid('pads'):
|
||||
assert num_of_inputs == 1, "Pad operation has pads attribute and unexpected additional input " \
|
||||
"argument for node {}.".format(node.name)
|
||||
else:
|
||||
assert num_of_inputs >= 2, "Missing required second input argument for node {} and pads attribute " \
|
||||
"is missing.".format(node.name)
|
||||
node['pads'] = node.in_node(1).value
|
||||
if num_of_inputs in [3, 4]:
|
||||
pads_begin = node.in_node(1).value
|
||||
pads_end = node.in_node(2).value
|
||||
node['pads'] = np.concatenate((pads_begin.reshape(-1, 1), pads_end.reshape(-1, 1)), 1)
|
||||
node['fill_value'] = node.in_node(3).value if num_of_inputs == 4 else 0.0
|
||||
padding = node.pads
|
||||
assert len(node.in_nodes()) in [3, 4], "The node {} must have 3 or 4 inputs".format(pad_node_name)
|
||||
|
||||
input_shape = node.in_node(0).shape
|
||||
if padding is None or input_shape is None:
|
||||
log.error('The paddings are not defined for node "{}"'.format(node.soft_get('name')))
|
||||
return
|
||||
input_shape = node.in_port(0).data.get_shape()
|
||||
pad_beg = node.in_port(1).data.get_value()
|
||||
pad_end = node.in_port(2).data.get_value()
|
||||
|
||||
# paddings can be defined, partially defined or undefined
|
||||
# TODO for now we only handle fully defined paddings
|
||||
# That means that intermediate tensor that delivers padding
|
||||
# should have defined value and size Nx2
|
||||
# TODO possible broadcasts are not supported
|
||||
assert (padding.ndim == 2 and padding.shape[1] == 2)
|
||||
assert pad_beg is not None, 'The padding begin value is None for node {}'.format(pad_node_name)
|
||||
assert pad_end is not None, 'The padding end value is None for node {}'.format(pad_node_name)
|
||||
assert input_shape is not None, 'The input shape is None for node {}'.format(pad_node_name)
|
||||
assert len(input_shape) == len(pad_beg), \
|
||||
'Length of begin padding "{}" does not correspond to input tensor shape "{}" for node "{}".' \
|
||||
''.format(pad_beg, input_shape, pad_node_name)
|
||||
assert len(input_shape) == len(pad_end), \
|
||||
'Length of end padding "{}" does not correspond to input tensor shape "{}" for node "{}".' \
|
||||
''.format(pad_beg, input_shape, pad_node_name)
|
||||
|
||||
# make sure that input has the same number of dimensions as the number of padding dimensions
|
||||
assert (padding.shape[0] == len(input_shape)), \
|
||||
"Input tensor shape {} and pads values {} do not match for Pad node {}".format(
|
||||
input_shape, padding.shape, node.name
|
||||
)
|
||||
node.out_port(0).data.set_shape(input_shape + pad_beg + pad_end)
|
||||
|
||||
# sum low and high padding values to calculate the shape modification vector
|
||||
shape_change = np.add.reduce(padding, 1)
|
||||
assert (shape_change.shape == input_shape.shape)
|
||||
if node.in_port(0).data.get_value() is not None:
|
||||
pads = np.insert(pad_end, np.arange(len(pad_end)), pad_beg)
|
||||
pads = np.reshape(pads, (len(pad_end), 2))
|
||||
pad_val = 0
|
||||
if len(node.in_nodes()) == 4:
|
||||
pad_val = node.in_port(3).data.get_value() if node.in_port(3).data is not None else 0
|
||||
node.out_port(0).data.set_value(np.pad(node.in_port(0).data.get_value(), pads, constant_values=pad_val,
|
||||
mode='constant'))
|
||||
# pad values should be permuted during the NHWC->NCHW layout change
|
||||
PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape')
|
||||
PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape')
|
||||
|
||||
# preserve non-positive values in the input shape, because it has a special meaning
|
||||
shape = np.array(
|
||||
[shape_change[i] + input_shape[i] if input_shape[i] > 0 else input_shape[i] for i in
|
||||
range(len(input_shape))])
|
||||
|
||||
assert len(node.out_nodes()) == 1
|
||||
class AttributedPad(Op):
|
||||
""" Pad operation that explicitly extends an input tensor at borders.
|
||||
|
||||
node.out_node().shape = shape
|
||||
This operation is uses the same semantics as Pad but with pad values specified as attributes.
|
||||
Pad values are in format [nDims, 2], where [:, 0] - begin pads, [:, 1] - end pads.
|
||||
"""
|
||||
|
||||
op = 'AttributedPad'
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph: Graph, attrs: dict):
|
||||
super().__init__(graph, {
|
||||
'op': self.op,
|
||||
'type': None,
|
||||
'infer': None, # the operation should be replaced before the shape inference
|
||||
'in_ports_count': 1,
|
||||
'out_ports_count': 1,
|
||||
'mode': 'constant',
|
||||
'fill_value': float(0),
|
||||
'pads': None,
|
||||
}, attrs)
|
||||
|
||||
|
||||
class TFPad(Op):
|
||||
""" Pad operation that explicitly extends an input tensor at borders.
|
||||
|
||||
This operation with the TensorFlow semantics with inputs:
|
||||
1. Input tensor.
|
||||
2. Pad values [nDims, 2]
|
||||
3. Fill value (Optional)
|
||||
"""
|
||||
|
||||
op = 'TFPad'
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph: Graph, attrs: dict):
|
||||
super().__init__(graph, {
|
||||
'op': self.op,
|
||||
'type': None,
|
||||
'infer': None, # the operation should be replaced before the shape inference
|
||||
'in_ports_count': 3,
|
||||
'out_ports_count': 1,
|
||||
'mode': 'constant',
|
||||
}, attrs)
|
||||
|
||||
@@ -19,21 +19,25 @@ import unittest
|
||||
import numpy as np
|
||||
|
||||
from mo.graph.graph import Node
|
||||
from mo.ops.pad import Pad
|
||||
from mo.ops.pad import Pad, AttributedPad
|
||||
from mo.utils.unittest.graph import build_graph
|
||||
|
||||
|
||||
class TestPadONNXOp(unittest.TestCase):
|
||||
class TestPadOps(unittest.TestCase):
|
||||
node_attrs = {
|
||||
'data_in': {
|
||||
'kind': 'data',
|
||||
'shape': np.array([1, 3, 100, 200])
|
||||
},
|
||||
# optional input for one of the two flavors of pad op
|
||||
'data_pads': {
|
||||
'pads_begin': {
|
||||
'kind': 'data',
|
||||
'value': np.array([[0, 0], [0, 0], [1, 3], [2, 4]], dtype=np.int64),
|
||||
'shape': np.array([2, 4], dtype=np.int64)
|
||||
'value': np.array([0, 0, 1, 2], dtype=np.int64),
|
||||
'shape': np.array([4], dtype=np.int64)
|
||||
},
|
||||
'pads_end': {
|
||||
'kind': 'data',
|
||||
'value': np.array([0, 0, 3, 4], dtype=np.int64),
|
||||
'shape': np.array([4], dtype=np.int64)
|
||||
},
|
||||
'pad': {
|
||||
'op': 'Pad',
|
||||
@@ -43,6 +47,7 @@ class TestPadONNXOp(unittest.TestCase):
|
||||
'data_out': {
|
||||
'kind': 'data',
|
||||
'shape': None,
|
||||
'value': None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +56,7 @@ class TestPadONNXOp(unittest.TestCase):
|
||||
('pad', 'data_out')
|
||||
]
|
||||
|
||||
def test_one_input(self):
|
||||
def test_attribute_pad_no_infer(self):
|
||||
graph = build_graph(
|
||||
self.node_attrs,
|
||||
self.edge_attrs,
|
||||
@@ -59,36 +64,45 @@ class TestPadONNXOp(unittest.TestCase):
|
||||
nodes_with_edges_only=True,
|
||||
)
|
||||
pad_node = Node(graph, 'pad')
|
||||
Pad.infer(pad_node)
|
||||
self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4])))
|
||||
with self.assertRaisesRegex(AttributeError, ".*has no attribute 'infer'.*"):
|
||||
AttributedPad.infer(pad_node)
|
||||
|
||||
def test_two_inputs(self):
|
||||
graph = build_graph(
|
||||
self.node_attrs,
|
||||
self.edge_attrs + [('data_pads', 'pad')],
|
||||
self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')],
|
||||
nodes_with_edges_only=True,
|
||||
)
|
||||
pad_node = Node(graph, 'pad')
|
||||
Pad.infer(pad_node)
|
||||
self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4])))
|
||||
|
||||
def test_one_input_and_no_pads(self):
|
||||
def test_not_enough_inputs(self):
|
||||
graph = build_graph(
|
||||
self.node_attrs,
|
||||
self.edge_attrs,
|
||||
self.edge_attrs + [('pads_begin', 'pad')],
|
||||
nodes_with_edges_only=True,
|
||||
)
|
||||
pad_node = Node(graph, 'pad')
|
||||
with self.assertRaisesRegex(AssertionError, ".*pads attribute is missing.*"):
|
||||
with self.assertRaisesRegex(AssertionError, ".*must have 3 or 4 inputs.*"):
|
||||
Pad.infer(pad_node)
|
||||
|
||||
def test_two_inputs_and_pads(self):
|
||||
def test_two_inputs_value_infer(self):
|
||||
in_value = np.random.rand(*self.node_attrs['data_in']['shape']).astype(np.float32)
|
||||
graph = build_graph(
|
||||
self.node_attrs,
|
||||
self.edge_attrs + [('data_pads', 'pad')],
|
||||
{'pad': {'pads': np.array([[0, 0], [0, 0], [1, 3], [2, 4]], dtype=np.int64)}},
|
||||
self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')],
|
||||
{'data_in': {'value': in_value}},
|
||||
nodes_with_edges_only=True,
|
||||
)
|
||||
|
||||
pads = np.insert(self.node_attrs['pads_end']['value'],
|
||||
np.arange(len(self.node_attrs['pads_begin']['value'])), self.node_attrs['pads_begin']['value'])
|
||||
pads = np.reshape(pads, (len(self.node_attrs['pads_begin']['value']), 2))
|
||||
ref_value = np.pad(in_value, pads, constant_values=0, mode='constant')
|
||||
|
||||
pad_node = Node(graph, 'pad')
|
||||
with self.assertRaisesRegex(AssertionError, ".*unexpected additional input argument.*"):
|
||||
Pad.infer(pad_node)
|
||||
Pad.infer(pad_node)
|
||||
|
||||
self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4])))
|
||||
self.assertTrue(np.array_equal(Node(graph, 'data_out').value, ref_value))
|
||||
|
||||
@@ -112,12 +112,11 @@ class Pooling(Op):
|
||||
pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)
|
||||
|
||||
rounding = np.floor
|
||||
if node.has_valid('pooling_convention') and node.pooling_convention == 'full':
|
||||
if node.soft_get('pooling_convention') == 'full' or node.soft_get('rounding_type') == 'ceil':
|
||||
rounding = np.ceil
|
||||
output_spatial_shape = np.array(rounding(
|
||||
np.array(input_spatial_shape + pad_spatial_shape - window_spatial_shape,
|
||||
dtype=np.float) / stride_spatial),
|
||||
dtype=np.int64) + 1
|
||||
dtype=np.float) / stride_spatial), dtype=np.int64) + 1
|
||||
|
||||
original_pads = np.array([i[1] for i in node.pad_spatial_shape])
|
||||
|
||||
|
||||
107
model-optimizer/mo/ops/space_to_batch.py
Normal file
107
model-optimizer/mo/ops/space_to_batch.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""
|
||||
Copyright (C) 2018-2020 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
from mo.front.common.partial_infer.utils import int64_array
|
||||
from mo.graph.perm_inputs import PermuteInputs
|
||||
from mo.ops.op import Op
|
||||
import numpy as np
|
||||
|
||||
|
||||
class SpaceToBatch(Op):
|
||||
op = 'SpaceToBatch'
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph, attrs: dict):
|
||||
super().__init__(graph, {
|
||||
'kind': 'op',
|
||||
'op': self.op,
|
||||
'type': self.op,
|
||||
'in_ports_count': 3,
|
||||
'out_ports_count': 1,
|
||||
'version': 'opset2',
|
||||
'infer': __class__.infer,
|
||||
}, attrs)
|
||||
|
||||
@staticmethod
|
||||
def infer(node):
|
||||
"""
|
||||
https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch
|
||||
"""
|
||||
input_shape = node.in_node(0).shape
|
||||
if input_shape is None:
|
||||
return
|
||||
|
||||
if len(node.in_nodes()) != 4:
|
||||
return
|
||||
|
||||
block_size = node.in_port(1).data.get_value()
|
||||
pads_begin = node.in_port(2).data.get_value()
|
||||
pads_end = node.in_port(3).data.get_value()
|
||||
if block_size is None or pads_begin is None or pads_end is None:
|
||||
return
|
||||
|
||||
pads = pads_begin + input_shape + pads_end
|
||||
|
||||
node.out_node().shape = int64_array([input_shape[0] * np.prod(block_size),
|
||||
*[int(x) for x in (pads[1:] / block_size[1:])]])
|
||||
|
||||
# block_shape, pads_begin, pads_end should be permuted during the NHWC->NCHW layout change
|
||||
PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape')
|
||||
PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape')
|
||||
PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:0', 'shape')
|
||||
|
||||
|
||||
class BatchToSpace(Op):
|
||||
op = 'BatchToSpace'
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph, attrs: dict):
|
||||
super().__init__(graph, {
|
||||
'kind': 'op',
|
||||
'op': self.op,
|
||||
'type': self.op,
|
||||
'in_ports_count': 3,
|
||||
'out_ports_count': 1,
|
||||
'version': 'opset2',
|
||||
'infer': __class__.infer
|
||||
}, attrs)
|
||||
|
||||
@staticmethod
|
||||
def infer(node):
|
||||
input_shape = node.in_node(0).shape
|
||||
if input_shape is None:
|
||||
return
|
||||
|
||||
if len(node.in_nodes()) != 4:
|
||||
return
|
||||
|
||||
block_size = node.in_port(1).data.get_value()
|
||||
crops_begin = node.in_port(2).data.get_value()
|
||||
crops_end = node.in_port(3).data.get_value()
|
||||
if block_size is None or crops_begin is None or crops_end is None:
|
||||
return
|
||||
|
||||
pads = block_size * input_shape
|
||||
|
||||
sizes = pads[1:] - crops_begin[1:] - crops_end[1:]
|
||||
batch = int(input_shape[0] / (np.prod(block_size)))
|
||||
|
||||
node.out_node().shape = int64_array([batch, *sizes])
|
||||
|
||||
# block_shape, crops_begin, crops_end values should be permuted during the NHWC->NCHW layout change
|
||||
PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape')
|
||||
PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape')
|
||||
PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:0', 'shape')
|
||||
Reference in New Issue
Block a user