[MO] Fix many spelling and grammar errors (#2543)
This commit is contained in:
parent
af661ae0fe
commit
0e502ffb35
@ -86,7 +86,7 @@ class TensorFlowYOLOV1V2Analysis(AnalyzeAction):
|
||||
"\t--input_model <path_to_model>/<model_name>.pb\n" \
|
||||
"\t--batch 1\n" \
|
||||
"\t--tensorflow_use_custom_operations_config <OPENVINO_INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf/<yolo_config>.json\n" \
|
||||
"All detailed information about conversion of this model can be fount at\n" \
|
||||
"All detailed information about conversion of this model can be found at\n" \
|
||||
"https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html"
|
||||
return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message
|
||||
else:
|
||||
@ -113,7 +113,7 @@ class TensorFlowYOLOV3Analysis(AnalyzeAction):
|
||||
"\t--input_model <path_to_model>/yolo_v3.pb\n" \
|
||||
"\t--batch 1\n" \
|
||||
"\t--tensorflow_use_custom_operations_config <OPENVINO_INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf/yolo_v3.json\n" \
|
||||
"Detailed information about conversion of this model can be fount at\n" \
|
||||
"Detailed information about conversion of this model can be found at\n" \
|
||||
"https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html"
|
||||
return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message
|
||||
else:
|
||||
|
@ -138,7 +138,7 @@ class ShuffleChannelFusion(BackReplacementPattern):
|
||||
We are able to perform the fusion if the pattern satisfies the conditions:
|
||||
1. Pattern input 4D shape is the same as pattern output 4D shape
|
||||
2. First Reshape splits channel dimension (1 axis) into two dimensions
|
||||
3. Transpose permutes only splitted dimensions
|
||||
3. Transpose permutes only split dimensions
|
||||
4. Second Reshape pack them back
|
||||
|
||||
Fixes original models reshape-ability (Smart reshape)
|
||||
@ -208,7 +208,7 @@ class DepthToSpaceFusion(BackReplacementPattern):
|
||||
We are able to perform the fusion if the pattern satisfies the conditions:
|
||||
1. Pattern has 6D input and 4D output
|
||||
2. First Reshape splits channel dimension (1 axis) into three dimensions [new_depth, block_size, block_size]
|
||||
3. Transpose permutes splitted dimensions with spatial ones
|
||||
3. Transpose permutes split dimensions with spatial ones
|
||||
4. Second Reshape pack block size together with spatial dimension
|
||||
|
||||
Fixes original models reshape-ability (Smart reshape)
|
||||
|
@ -80,7 +80,7 @@ class CreateConstNodesReplacement(BackReplacementPattern):
|
||||
def replace_pattern(self, graph: Graph, match: dict):
|
||||
"""
|
||||
Adds layers with type 'Const' that produce blob from 'bin' file. The pass finds data nodes with one output which
|
||||
doesn't have edge with 'bin' attribute (or with two outputs and at least one output havent 'bin' attr)
|
||||
doesn't have edge with 'bin' attribute (or with two outputs and at least one output doesn't have 'bin' attr)
|
||||
and generate Const op node before the node and data node before the Const node. The data node before 'Const'
|
||||
node is needed because the op node dumps input tensors to bin file.
|
||||
"""
|
||||
|
@ -88,7 +88,7 @@ class MemoryOffsetAdjustment(FrontReplacementSubgraph):
|
||||
\ |
|
||||
\ |
|
||||
Concat
|
||||
In Left branch we have MemoryOffset with k > 0 so we wait until kth frame will be calcualted. In right branch
|
||||
In Left branch we have MemoryOffset with k > 0 so we wait until kth frame will be calculated. In right branch
|
||||
we have no such offsets. As result we Concat (or use in any calculations with more than 1 input) kth frame from
|
||||
left branch and 0th from right branch. So we need to add synchronization before Concat node. it can be done with
|
||||
MemoryOffset(k) inserted before Concat.
|
||||
|
@ -29,7 +29,7 @@ from mo.ops.const import Const
|
||||
class SsdPatternAnchorReshape(FrontReplacementSubgraph):
|
||||
"""
|
||||
Find ssd anchors and setup variants values.
|
||||
Need to provide compatibility wit IE DetectionOutpyt layer.
|
||||
Need to provide compatibility with IE DetectionOutput layer.
|
||||
"""
|
||||
enabled = True
|
||||
graph_condition = [lambda graph: graph.graph['fw'] == 'mxnet' and graph.graph['cmd_params'].enable_ssd_gluoncv]
|
||||
|
@ -52,7 +52,7 @@ class UpsampleFrontExtractor(FrontExtractorOp):
|
||||
)
|
||||
if math.fabs(scales[0] - 1) > 1e-5 or math.fabs(scales[1] - 1) > 1e-5:
|
||||
raise Error(
|
||||
'Upsampling of batch and feature dimentions is not supported for node {}.',
|
||||
'Upsampling of batch and feature dimensions is not supported for node {}.',
|
||||
node.name
|
||||
)
|
||||
height_scale = scales[2]
|
||||
|
@ -73,7 +73,7 @@ class UpsampleONNXExtractorTest(BaseExtractorsTestingClass):
|
||||
def test_invalid_scales(self):
|
||||
inp, ref = self._base_attrs()
|
||||
inp['scales'] = [1.5, 1.5, 2.0, 2.0]
|
||||
with self.assertRaisesRegex(Error, '.*Upsampling of batch and feature dimentions is not supported for node.*'):
|
||||
with self.assertRaisesRegex(Error, '.*Upsampling of batch and feature dimensions is not supported for node.*'):
|
||||
out = self._extract(inp)
|
||||
|
||||
def test_invalid_2D_scales(self):
|
||||
|
@ -37,7 +37,7 @@ def is_value_is_constant(val: np.ndarray, const: [int, float]):
|
||||
class FlattenToReshapeableReshape(FrontReplacementSubgraph):
|
||||
"""
|
||||
The TensorFlow implementation of the Flatten operation is not reshape-able because the batch size is hardcoded
|
||||
during te constant propagation. This transform sets the 'dim' attribute for the Reshape to [0, -1].
|
||||
during the constant propagation. This transform sets the 'dim' attribute for the Reshape to [0, -1].
|
||||
"""
|
||||
enabled = True
|
||||
|
||||
|
@ -46,7 +46,7 @@ class BinarizeWeightsM1P1(MiddleReplacementPattern):
|
||||
transparent.
|
||||
|
||||
#TODO Describe how to apply multiplication at output ports -- this is not specified. In the current definition
|
||||
we can pass through only scalar multiplication, but we already requre passing it channel-wise.
|
||||
we can pass through only scalar multiplication, but we already require passing it channel-wise.
|
||||
"""
|
||||
enabled = True
|
||||
|
||||
|
@ -110,7 +110,7 @@ class BlockLSTMtoLSTMSequence(MiddleReplacementPattern):
|
||||
concatenated cell states over the whole time sequence -> last cell state
|
||||
|
||||
BlockLSTM
|
||||
|| out 1 (concatenated cell states comming out of BlockLSTM)
|
||||
|| out 1 (concatenated cell states coming out of BlockLSTM)
|
||||
\/ in 1
|
||||
ConcatV2
|
||||
|| (concatenation with initial state or another unused data)
|
||||
@ -265,10 +265,10 @@ class BlockLSTMtoLSTMSequence(MiddleReplacementPattern):
|
||||
list_of_concatenated_hidden_states_children_node_ids.append(node.id)
|
||||
|
||||
if len(list_of_concatenated_hidden_states_children_node_ids) != 1:
|
||||
return # not supported placement of patten
|
||||
return # not supported placement of pattern
|
||||
conacenated_child_node_id = list_of_concatenated_hidden_states_children_node_ids[0]
|
||||
if conacenated_child_node_id != match['after_mul_op_to_the_rest_of_model'].id:
|
||||
return # not supported placement of patten
|
||||
return # not supported placement of pattern
|
||||
|
||||
gather_indexes = match['gather_0'].in_node(1).value
|
||||
if len(gather_indexes) == 1:
|
||||
|
@ -24,7 +24,7 @@ from mo.ops.op import Op
|
||||
class ONNXRNNSequenceNormalize(MiddleReplacementPattern):
|
||||
"""
|
||||
Convert blobs and shapes of ONNX-like LSTM, GRU, RNN cells to common form (internal for MO).
|
||||
After this normalization pass passes for spliting bidirectional calls and
|
||||
After this normalization pass passes for splitting bidirectional calls and
|
||||
multilayer cells will be applied.
|
||||
|
||||
This transformation pass involves weights and shapes processing only:
|
||||
|
@ -142,7 +142,7 @@ class ClampQuantizeMark(MiddleReplacementPattern):
|
||||
return
|
||||
max_value = quantize.in_port(2).data.get_value()
|
||||
if max_value is None:
|
||||
log.debug('ReluQuantizeFuse: cannot fuse because FakeQuantize op has dynamic input on the 2st port, '
|
||||
log.debug('ReluQuantizeFuse: cannot fuse because FakeQuantize op has dynamic input on the 2nd port, '
|
||||
'levels=`{}`'.format(quantize.levels))
|
||||
return
|
||||
if np.all(min_value >= clamp_min) and np.all(max_value <= clamp_max):
|
||||
|
@ -144,7 +144,7 @@ class SmartInputMatcher(MiddleReplacementPattern):
|
||||
if shape['kind'] == 'op' and shape['op'] == 'Const':
|
||||
start = 0
|
||||
end = shape.value[0]
|
||||
log.warning("You network cannot be reshaped since shapes of placeholders is a contants."
|
||||
log.warning("Your network cannot be reshaped since shapes of placeholders are constants."
|
||||
"Please, provide non-constant shapes. ")
|
||||
|
||||
# Create input node with params
|
||||
|
@ -186,7 +186,7 @@ class TransposeTensorIteratorLSTM(MiddleReplacementPattern):
|
||||
assert len(data_output_port) == 1
|
||||
data_input_port = data_input_port[0]
|
||||
data_output_port = data_output_port[0]
|
||||
# Verify that they are really connected to Transpose layers (guarantied by port numbers of TI, see the pattern)
|
||||
# Verify that they are really connected to Transpose layers (guaranteed by port numbers of TI, see the pattern)
|
||||
assert ti.in_edge(0)['external_port_id'] == ti.input_port_map[data_input_port]['external_port_id']
|
||||
assert ti.out_edge(0)['external_port_id'] == ti.output_port_map[data_output_port]['external_port_id']
|
||||
|
||||
|
@ -101,7 +101,7 @@ class ReverseTensorIteratorLSTM(MiddleReplacementPattern):
|
||||
|
||||
if not self.is_fusable_reverse_sequence(direct_reverse) or \
|
||||
not self.is_fusable_reverse_sequence(inverse_reverse):
|
||||
# we can not merge ReverseSequence with ot equal sequences
|
||||
# we can not merge ReverseSequence without equal sequences
|
||||
return
|
||||
|
||||
# Modify stride in TI
|
||||
|
@ -66,7 +66,7 @@ class FakeQuantize(Op):
|
||||
inputs = [node.in_node(i) for i in range(5)]
|
||||
x, input_low, input_high, output_low, output_high = inputs
|
||||
assert x.has_valid('shape')
|
||||
# TODO Check all input[1..4] shapes are broadcastable to intput[0] shape
|
||||
# TODO Check all inputs[1..4] shapes are broadcastable to inputs[0] shape
|
||||
assert all([broadcastable(inputs[i].shape, inputs[0].shape) for i in range(1, 5)]), \
|
||||
"Not all shapes from FakeQuantize inputs can be broadcasted to input[0] for node {}".format(
|
||||
node.soft_get('name'))
|
||||
|
@ -61,6 +61,6 @@ class SparseReshape(Op):
|
||||
output_indices_shape = np.concatenate((input_indices_shape[0:1], new_shape_shape))
|
||||
node.out_port(0).data.set_shape(output_indices_shape)
|
||||
|
||||
# TODO: implement constant value propogation for common case
|
||||
# TODO: implement constant value propagation for common case
|
||||
if np.array_equal(input_shape_value, output_shape_value) and input_indices_value is not None:
|
||||
node.out_port(0).data.set_value(input_indices_value)
|
||||
|
@ -28,7 +28,7 @@ class SparseSegmentSqrtN(Op):
|
||||
- [0, required] Data tensor from which rows are selected for the sum divided by sqrt of N (ND),
|
||||
- [1, required] Tensor of indices of selected rows from the first input tensor along 0 dimension (1D),
|
||||
- [2, required] Tensor of segment IDs to which selected rows belong.
|
||||
Selected rows beloging to the same segment are summed up. The tensor has the same size as the second input.
|
||||
Selected rows belonging to the same segment are summed up. The tensor has the same size as the second input.
|
||||
Values must be sorted and can be repeated. (1D).
|
||||
|
||||
One output:
|
||||
|
@ -28,7 +28,7 @@ class SparseSegmentSum(Op):
|
||||
- [0, required] Data tensor from which rows are selected for the sum (ND),
|
||||
- [1, required] Tensor of indices of selected rows from the first input tensor along 0 dimension (1D),
|
||||
- [2, required] Tensor of segment IDs to which selected rows for the sum belong.
|
||||
Selected rows beloging to the same segment are summed up. The tensor has the same size as the second input.
|
||||
Selected rows belonging to the same segment are summed up. The tensor has the same size as the second input.
|
||||
Values must be sorted and can be repeated. (1D).
|
||||
|
||||
One output:
|
||||
|
@ -110,10 +110,10 @@ class VariadicSplitBase(Op):
|
||||
# value propagation
|
||||
input_value = node.in_port(0).data.get_value()
|
||||
if input_value is not None:
|
||||
splitted = np.split(input_value, idxs[:-1], axis)
|
||||
split = np.split(input_value, idxs[:-1], axis)
|
||||
for i, port in node.out_ports().items():
|
||||
if not port.disconnected():
|
||||
port.data.set_value(splitted[i])
|
||||
port.data.set_value(split[i])
|
||||
|
||||
if op == 'VariadicSplit':
|
||||
PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis')
|
||||
|
@ -213,7 +213,7 @@ class TensorIterator(Op):
|
||||
internal_node = node_map[internal_node_id]
|
||||
|
||||
if internal_node.soft_get('type') != 'Result':
|
||||
# this output wont get out of the body, but it is still Result and needed on non first iterations of TI
|
||||
# this output won't get out of the body, but it is still Result and needed on non first iterations of TI
|
||||
assert 'from_port' in record
|
||||
out_port = TensorIterator.special_port_to_real_port(internal_node, record['from_port'], 'out')
|
||||
assert out_port in internal_node.out_ports() and not internal_node.out_port(out_port).disconnected()
|
||||
@ -300,7 +300,7 @@ class TensorIterator(Op):
|
||||
def generate_port_map(node: Node, src_port_map):
|
||||
""" Extract port_map attributes from node and node.body attributes.
|
||||
|
||||
It iterates over src_port_map and substitude external_port_id, internal_port_id and
|
||||
It iterates over src_port_map and substitute external_port_id, internal_port_id and
|
||||
internal_layer_id by real values queried from node ports and node.body attributes.
|
||||
"""
|
||||
result_list = []
|
||||
|
@ -38,7 +38,7 @@ class Unique(Op):
|
||||
and sorted in the same order as in the input (1D)
|
||||
- [1, optional] tensor of indices for each value of the input
|
||||
in the tensor of unique elements (1D)
|
||||
- [2, optional] tensor with a number of occurences for each unique element
|
||||
- [2, optional] tensor with a number of occurrences for each unique element
|
||||
in the input (1D)
|
||||
'''
|
||||
op = 'Unique'
|
||||
|
@ -36,7 +36,7 @@ def serialize_constants(graph: Graph, bin_file_name:str, data_type=np.float32):
|
||||
Args:
|
||||
@graph: input graph with op and data nodes
|
||||
@bin_file_name: path to file to write blobs to
|
||||
@data_type: numpy data type to convert all blob elemnts to
|
||||
@data_type: numpy data type to convert all blob elements to
|
||||
|
||||
"""
|
||||
bin_hashes = {}
|
||||
@ -392,7 +392,7 @@ def generate_ie_ir(graph: Graph, file_name: str, input_names: tuple = (), mean_o
|
||||
mean_size: tuple = (), meta_info: dict = dict()):
|
||||
"""
|
||||
Extracts IE/IR attributes from kind='op' nodes in three ways:
|
||||
(1) node.IE xml scheme that set correspondance from existing attributes to generated xml elements
|
||||
(1) node.IE xml scheme that sets correspondence from existing attributes to generated xml elements
|
||||
(2) input/output edges that don't have 'bin' attributes are transformed to input/output ports
|
||||
(3) input edges that has 'bin' attributes are handled in special way like weights/biases
|
||||
|
||||
|
@ -99,12 +99,12 @@ def load_caffe_proto_model(caffe_pb2, proto_path: str, model_path: [str, None] =
|
||||
from google.protobuf.pyext import cpp_message
|
||||
# Check os windows and env variable PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION
|
||||
if os.name == 'nt' and os.environ.get('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', default='') != 'cpp':
|
||||
# 2. cpp implementaion is available but not used
|
||||
# 2. cpp implementation is available but not used
|
||||
message += 'However, cpp implementation is available, you can boost ' \
|
||||
'model conversion by setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION env variable to cpp. \n' \
|
||||
'Run: set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp \n'
|
||||
except ImportError:
|
||||
# 3. cpp implementaion is not available
|
||||
# 3. cpp implementation is not available
|
||||
message += 'However you can use the C++ protobuf implementation that is supplied with the OpenVINO toolkit' \
|
||||
'or build protobuf library from sources. \n' \
|
||||
'Navigate to "install_prerequisites" folder and run: ' \
|
||||
|
@ -59,7 +59,7 @@ def concat_infer(node):
|
||||
|
||||
node.out_node(0).shape = shape
|
||||
if len(shape) != 4:
|
||||
# exclude it from NHWC to NCHW convertion
|
||||
# exclude it from NHWC to NCHW conversion
|
||||
if 'axis' in node.dim_attrs:
|
||||
node.dim_attrs.remove('axis')
|
||||
|
||||
|
@ -105,7 +105,7 @@ class Node:
|
||||
# no handling of control flow edges -- TODO
|
||||
control_flow = False
|
||||
if not skip_if_absent and idx not in self.in_ports(control_flow=control_flow):
|
||||
raise Error("Input port with index {} does't exist in node {}.".format(idx, self.soft_get('name')))
|
||||
raise Error("Input port with index {} doesn't exist in node {}.".format(idx, self.soft_get('name')))
|
||||
if not self.in_port(idx).disconnected():
|
||||
self.in_port(idx).disconnect()
|
||||
del self._in_ports[idx]
|
||||
|
@ -117,7 +117,7 @@ def muladd_to_scaleshift_action(graph: Graph, match: dict):
|
||||
weights.shape = np.array(weights.value.shape, dtype=np.int64)
|
||||
|
||||
if bias.shape != weights.shape:
|
||||
log.warning('Mul->Add to ScaleShift conversion stoped {} != {}'.format(weights.shape, bias.shape))
|
||||
log.warning('Mul->Add to ScaleShift conversion stopped {} != {}'.format(weights.shape, bias.shape))
|
||||
return
|
||||
|
||||
if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size:
|
||||
|
@ -46,7 +46,7 @@ def concat_convolutions(graph: Graph, start_node: Node, last_node: Node):
|
||||
channel_dim = gconv.channel_dims[0]
|
||||
split_axis = start_node.in_port(1).data.get_value()
|
||||
if channel_dim != split_axis or channel_dim != last_node.axis:
|
||||
log.debug('Grouped convolutions fusion : split or concat has wierd axis!')
|
||||
log.debug('Grouped convolutions fusion : split or concat has weird axis!')
|
||||
return False
|
||||
|
||||
# Check that all convolutions has the same parameters
|
||||
|
@ -23,7 +23,7 @@ from mo.middle.pattern_match import apply_pattern
|
||||
|
||||
def _convert_to_leaky_relu_action(graph: Graph, matches: dict):
|
||||
"""
|
||||
This function checks given patten and if pattern satisfies all requirements, converts to ReLU with negative slope
|
||||
This function checks given pattern and if pattern satisfies all requirements, converts to ReLU with negative slope
|
||||
"""
|
||||
mul_op = matches['mul_op']
|
||||
mul_value_data = matches['const_data']
|
||||
|
@ -111,7 +111,7 @@ def check_node_usages_out_of_match(match: dict, node_name_in_match_group: str):
|
||||
|
||||
|
||||
def node_match(data1: dict, data2: dict):
|
||||
# We have to skip _in_ports/_out_ports attributes for comparision as they are not comparable
|
||||
# We have to skip _in_ports/_out_ports attributes for comparison as they are not comparable
|
||||
return dict_includes(data1, data2, skip_attr_names=['_in_ports', '_out_ports'])
|
||||
|
||||
|
||||
|
@ -114,7 +114,7 @@ class Convolution(Op):
|
||||
weights_index = node.weights_index if node.has_valid('weights_index') else 1
|
||||
|
||||
# Reshape weights kernel to original shape
|
||||
# In case of caffe ot MXNet framework, values for weights has no structed shape like OIHW
|
||||
# In case of caffe or MXNet framework, values for weights have no structured shape like OIHW
|
||||
# so we have to reshape weights to normal shape
|
||||
# For this case, Convolution node should have attribute reshape_kernel = True
|
||||
if node.has_valid('reshape_kernel') and node.reshape_kernel:
|
||||
|
@ -40,7 +40,7 @@ class MemoryOffset(Op):
|
||||
|
||||
@staticmethod
|
||||
def infer(node: Node):
|
||||
# MemoryOffset is splitted in 2 parts to avoid cycle in graph
|
||||
# MemoryOffset is split into 2 parts to avoid cycle in graph
|
||||
# Calculate shape from shape of previous layer where possible
|
||||
# In other cases information about shapes from initial Kaldi model used
|
||||
if not node.in_port(0).disconnected():
|
||||
|
@ -110,7 +110,7 @@ def collect_sub_graphs(graph: Graph):
|
||||
def relabel_nodes_inplace_safe(graph: Graph, new_labels: dict):
|
||||
""" Safely relabels graph in-place without graph copy.
|
||||
|
||||
Safety in this place means that it is guarantied that
|
||||
Safety in this place means that it is guaranteed that
|
||||
there won't be collisions during relabeling process.
|
||||
"""
|
||||
# Relabel nodes in two stages
|
||||
|
@ -49,7 +49,7 @@ class TestFunction(unittest.TestCase):
|
||||
test_data = test_data
|
||||
self.assertEqual(IREngine._IREngine__isfloat(test_data), result,
|
||||
"Function __isfloat is not working with value: {}".format(test_data))
|
||||
log.info('Test for function __is_float passed wit value: {}, expected result: {}'.format(test_data, result))
|
||||
log.info('Test for function __is_float passed with value: {}, expected result: {}'.format(test_data, result))
|
||||
|
||||
# TODO add comparison not for type IREngine
|
||||
def test_compare(self):
|
||||
@ -63,7 +63,7 @@ class TestFunction(unittest.TestCase):
|
||||
# Check function:
|
||||
flag, msg = self.IR.compare(self.IR_negative)
|
||||
self.assertFalse(flag, 'Comparing flag failed, test compare function failed')
|
||||
self.assertEqual('\n'.join(msg), reference_msg, 'Comparing message failes, test compare negative failed')
|
||||
self.assertEqual('\n'.join(msg), reference_msg, 'Comparing message failed, test compare negative failed')
|
||||
|
||||
log.info('Test for function compare passed')
|
||||
|
||||
@ -125,7 +125,7 @@ class TestFunction(unittest.TestCase):
|
||||
ti_nodes = IR.graph.get_op_nodes(type='TensorIterator')
|
||||
for ti in ti_nodes:
|
||||
if not ti.has_valid('body'):
|
||||
log.error('TensorIterator has not body attrubite for node: {}'.format(ti.name))
|
||||
log.error("TensorIterator doesn't have body attribute for node: {}".format(ti.name))
|
||||
else:
|
||||
const_ti_nodes = ti.body.graph.get_op_nodes(type='Const')
|
||||
for node in const_ti_nodes:
|
||||
|
@ -41,7 +41,7 @@ from mo.utils.class_registration import update_registration
|
||||
from mo.utils.import_extensions import import_by_path
|
||||
from mo.utils.ir_reader.extender import Extender
|
||||
|
||||
# Operations not registred in collect_ops() function
|
||||
# Operations not registered in collect_ops() function
|
||||
custom_ops = {
|
||||
'AvgPool': Pooling,
|
||||
'BiasAdd': BiasAdd,
|
||||
@ -272,7 +272,7 @@ def copy_graph_with_ops(graph: Graph) -> Graph:
|
||||
"""
|
||||
Function to copy graph and apply extenders to appropriate nodes
|
||||
:param graph: Graph to copy
|
||||
:return:Copied graph with applyed extenders
|
||||
:return:Copied graph with applied extenders
|
||||
"""
|
||||
new_graph = Graph()
|
||||
new_graph.stage = 'back'
|
||||
|
Loading…
Reference in New Issue
Block a user