Enable new FP16 and support mixed precision by MO (#8514)

* Enable new FP16 format and support mixed precision

* Apply review comments

* Fix issue with fp64 in FakeQuantWithMinMaxVars.py

* Enabme decompression converts fusing for CPU plugin

* Apply review feedback

* Fix code style

* Fix issue with np.full and apply review feedback

* Apply review feedback

* Fix HardSigmoid onnx extractor

* Replace np.arrays that were skipped with mo_array

* Fix compress_quantized_weights_test.py

* Fix import issues

* Apply review feedback and fix type of fusing linops in MO

* Apply review feedback

* Fix types for Mean/Scales and MXNET zeros

* Add RandomUniform_8 to ConvertPrecision

* Fix merge issue

* Fix consts names collision in GPU plugin
This commit is contained in:
Maxim Vafin
2021-12-24 14:00:37 +03:00
committed by GitHub
parent 43c45d3065
commit 3f35e2a321
200 changed files with 999 additions and 779 deletions

View File

@@ -112,7 +112,9 @@ bool ngraph::pass::CommonOptimizations::run_on_model(const std::shared_ptr<ngrap
// before CommonOptimization pipeline execution
manager.register_pass<ngraph::pass::MOCTransformations>(true, false);
manager.register_pass<ov::pass::ConvertCompressedOnlyToLegacy, false>();
// Enabling conversion of FP16 IR to legacy representation, each plugin have to disable it
// after support for FP16 IR is implemented
manager.register_pass<ov::pass::ConvertCompressedOnlyToLegacy>();
// TODO: move to KMB
manager.register_pass<ngraph::pass::WeightsDequantizeToFakeQuantize>();

View File

@@ -73,6 +73,7 @@ ov::pass::CompressFloatConstantsImpl::CompressFloatConstantsImpl() {
}
auto convert = std::make_shared<ov::opset8::Convert>(new_const, const_node->get_element_type());
new_const->set_friendly_name(const_node->get_friendly_name() + "_compressed");
convert->set_friendly_name(const_node->get_friendly_name());
ngraph::copy_runtime_info(const_node, convert);
ov::mark_as_decompression(convert);

View File

@@ -24,6 +24,7 @@ bool fuse_type_to_constant(const std::shared_ptr<ngraph::Node>& node,
const std::vector<ngraph::Input<ngraph::Node>>& consumers);
bool fuse_type_to_shapeof(const std::shared_ptr<ngraph::Node>& node, ngraph::element::Type to, size_t idx);
bool fuse_type_to_shapeof_v0(const std::shared_ptr<ngraph::Node>& node, ngraph::element::Type to, size_t idx);
bool fuse_type_to_random_uniform_v8(const std::shared_ptr<ngraph::Node>& node, ngraph::element::Type to, size_t idx);
bool fuse_type_to_range_v4(const std::shared_ptr<ngraph::Node>& node, ngraph::element::Type to, size_t idx);
bool fuse_type_to_parameter(const std::shared_ptr<ngraph::Node>& node, ngraph::element::Type to, size_t idx);
bool fuse_type_to_convert(const std::shared_ptr<ngraph::Node>& node, ngraph::element::Type to, size_t idx);
@@ -314,20 +315,20 @@ bool fuse_type_to_shapeof(const std::shared_ptr<ngraph::Node>& node, element::Ty
return false;
}
bool fuse_type_to_range_v4(const std::shared_ptr<ngraph::Node>& node, element::Type to, size_t idx) {
if (auto range = ov::as_type_ptr<opset4::Range>(node)) {
bool fuse_type_to_random_uniform_v8(const std::shared_ptr<ngraph::Node>& node, element::Type to, size_t idx) {
if (auto random_uniform = ov::as_type_ptr<opset8::RandomUniform>(node)) {
if (to.is_integral_number() || to.is_real()) {
range->set_output_type(to);
random_uniform->set_out_type(to);
return true;
}
}
return false;
}
bool fuse_type_to_random_uniform_v8(const std::shared_ptr<ngraph::Node>& node, element::Type to, size_t idx) {
if (auto random_uniform = ov::as_type_ptr<opset8::RandomUniform>(node)) {
bool fuse_type_to_range_v4(const std::shared_ptr<ngraph::Node>& node, element::Type to, size_t idx) {
if (auto range = ov::as_type_ptr<opset4::Range>(node)) {
if (to.is_integral_number() || to.is_real()) {
random_uniform->set_out_type(to);
range->set_output_type(to);
return true;
}
}

View File

@@ -34,6 +34,7 @@
#include <legacy/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
#include <transformations/common_optimizations/remove_concat_zero_dim_input.hpp>
#include <transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp>
#include <transformations/disable_decompression_convert_constant_folding.hpp>
#include <transformations/low_precision/disable_convert_constant_folding_on_const_path.hpp>
#include <transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp>
#include <transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp>
@@ -441,6 +442,7 @@ void CNNNetworkNGraphImpl::reshape(const std::map<std::string, ngraph::PartialSh
manager.register_pass<::ngraph::pass::ConvertMulticlassNmsToMulticlassNmsIE>();
manager.register_pass<::ngraph::pass::ConvertMatrixNmsToMatrixNmsIE>();
manager.register_pass<::ngraph::pass::DisableConvertConstantFoldingOnConstPath>();
manager.register_pass<::ov::pass::DisableDecompressionConvertConstantFolding>();
manager.register_pass<::ngraph::pass::ConstantFolding>();
// OneHotToLegacy changes output precision
manager.register_pass<::ngraph::pass::ConvertOneHotToOneHotIEMatcher>()->detect_output_type(

View File

@@ -35,6 +35,7 @@
#include <transformations/common_optimizations/nop_elimination.hpp>
#include <transformations/common_optimizations/wrap_interpolate_into_transposes.hpp>
#include <transformations/common_optimizations/transpose_sinking.hpp>
#include "transformations/common_optimizations/convert_compression_only_to_legacy.hpp"
#include <transformations/op_conversions/convert_broadcast_to_tiles.hpp>
#include <transformations/op_conversions/convert_depth_to_space.hpp>
#include <transformations/op_conversions/convert_shuffle_channels3.hpp>
@@ -77,6 +78,7 @@
#include <transformations/op_conversions/convert_reduce_to_pooling.hpp>
#include <transformations/convert_precision.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/disable_decompression_convert_constant_folding.hpp>
#include <transformations/rt_info/fused_names_attribute.hpp>
#include <transformations/op_conversions/fq_decomposition.hpp>
#include <transformations/utils/utils.hpp>
@@ -356,6 +358,11 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr<ngraph::Function>
});
// List of enabled/disabled transformations
// Allow FP16 Converts to be folded and FP16 constants to be upgraded to FP32 data type
pass_config->disable<ov::pass::DisableDecompressionConvertConstantFolding>();
pass_config->disable<ov::pass::ConvertCompressedOnlyToLegacy>();
pass_config->disable<ngraph::pass::ConvertGELU>();
pass_config->disable<ngraph::pass::ConvertShuffleChannels3>();
pass_config->disable<ngraph::pass::Gelu7Downgrade>();

View File

@@ -55,6 +55,8 @@
#include <transformations/common_optimizations/relu_fake_quantize_fusion.hpp>
#include <transformations/common_optimizations/add_fake_quantize_fusion.hpp>
#include <transformations/common_optimizations/transpose_sinking.hpp>
#include "transformations/common_optimizations/convert_compression_only_to_legacy.hpp"
#include "transformations/disable_decompression_convert_constant_folding.hpp"
#include <transformations/utils/utils.hpp>
#include "transformations/remove_extra_reshapes.hpp"
@@ -716,6 +718,11 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
// UnrollTI should be the last transformation in the transformation pipeline
manager.register_pass<ngraph::pass::UnrollTensorIterator>();
const auto& pass_config = manager.get_pass_config();
// Allowing FP16 Converts to be folded and FP16 constants to upgrade to FP32 data type
pass_config->disable<ov::pass::ConvertCompressedOnlyToLegacy>();
pass_config->disable<ov::pass::DisableDecompressionConvertConstantFolding>();
pass_config->disable<ngraph::pass::FakeQuantizeMulFusion>();
pass_config->disable<ngraph::pass::FakeQuantizeReshapeFusion>();
pass_config->disable<ngraph::pass::PullTransposeThroughFQUp>();

View File

@@ -1084,6 +1084,7 @@ openvino/tools/mo/utils/telemetry_params.py
openvino/tools/mo/utils/telemetry_stub.py
openvino/tools/mo/utils/telemetry_utils.py
openvino/tools/mo/utils/tensorboard_util.py
openvino/tools/mo/utils/type_utils.py
openvino/tools/mo/utils/unsupported_ops.py
openvino/tools/mo/utils/utils.py
openvino/tools/mo/utils/version.py

View File

@@ -6,6 +6,7 @@ import numpy as np
from openvino.tools.mo.back.ReshapeMutation import ReshapeMutation
from openvino.tools.mo.back.ReverseInputChannels import ApplyReverseChannels
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.partial_infer.utils import shape_array, is_fully_defined, int64_array
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs
from openvino.tools.mo.graph.graph import Graph, Node
@@ -207,15 +208,15 @@ class DeconvolutionNormalizer(BackReplacementPattern):
shape_src = node.in_port(2).get_source()
node.in_port(2).disconnect()
ss_0 = create_op_with_const_inputs(graph, StridedSlice, {1: np.array([2], dtype=np.int32),
2: np.array([in_rank], dtype=np.int32),
3: np.array([1], dtype=np.int32)},
ss_0 = create_op_with_const_inputs(graph, StridedSlice, {1: mo_array([2], dtype=np.int32),
2: mo_array([in_rank], dtype=np.int32),
3: mo_array([1], dtype=np.int32)},
{'name': node_name + '/ss_0_port',
'begin_mask': np.array([1], dtype=np.int32),
'end_mask': np.array([0], dtype=np.int32),
'new_axis_mask': np.array([0], dtype=np.int32),
'shrink_axis_mask': np.array([0], dtype=np.int32),
'ellipsis_mask': np.array([0], dtype=np.int32)})
'begin_mask': mo_array([1], dtype=np.int32),
'end_mask': mo_array([0], dtype=np.int32),
'new_axis_mask': mo_array([0], dtype=np.int32),
'shrink_axis_mask': mo_array([0], dtype=np.int32),
'ellipsis_mask': mo_array([0], dtype=np.int32)})
shape_src.connect(ss_0.in_port(0))
ss_0.out_port(0).connect(node.in_port(2))

View File

@@ -7,6 +7,7 @@ from openvino.tools.mo.back.ForceStrictPrecision import ForceStrictPrecision
from openvino.tools.mo.ops.elementwise import Add
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.graph.graph import Graph, Node
from openvino.tools.mo.ops.const import Const
from openvino.tools.mo.ops.shape import Shape
@@ -38,7 +39,7 @@ class CropToStridedSlice(BackReplacementPattern):
@staticmethod
def list_to_ndarray(val):
return np.array(val) if np.array(val).ndim != 0 else np.array([val])
return mo_array(val) if mo_array(val).ndim != 0 else mo_array([val])
def replace_pattern(self, graph: Graph, match: [str, Node]):
node = match['crop']
@@ -75,7 +76,7 @@ class CropToStridedSlice(BackReplacementPattern):
begin = Const(graph, {'value': self.mask_normalizer(shape_rank, node_axis, node_offset),
'name': ss.name + '/begin'}).create_node()
end_values = np.array([node_offset[i] + node_dim[i] for i in range(len(node_dim))])
end_values = mo_array([node_offset[i] + node_dim[i] for i in range(len(node_dim))])
end = Const(graph, {'value': self.mask_normalizer(shape_rank, node_axis, end_values),
'name': ss.name + '/end'}).create_node()
elif node.has_valid('crop_begin') and node.has_valid('crop_end'):

View File

@@ -1,10 +1,9 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.parameter import Parameter
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.crop import Crop
from openvino.tools.mo.utils.logger import log
@@ -68,7 +67,7 @@ class CutMemoryOutput(BackReplacementPattern):
in_node_port = node.in_port(0).get_source()
node.in_port(0).disconnect()
node.out_port(0).disconnect()
crop = Crop(graph, {'name': 'Result_for_'+node_id, 'dim': np.array([1]), 'offset': np.array([0]),
'axis': np.array([0])}).create_node()
crop = Crop(graph, {'name': 'Result_for_'+node_id, 'dim': mo_array([1]), 'offset': mo_array([0]),
'axis': mo_array([0])}).create_node()
in_node_port.connect(crop.in_port(0))
crop.out_port(0).connect(out_node_port)

View File

@@ -9,6 +9,7 @@ from openvino.tools.mo.ops.interpolate import Interpolate
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.shape import Shape
@@ -71,7 +72,7 @@ class InterpolateConcat(BackReplacementPattern):
shape = Shape(graph, {'name': src.node.soft_get('name', src.node.id) + '/Shape'}).create_node()
shape.in_port(0).connect(src)
gather = create_op_with_const_inputs(graph, Gather,
{1: np.array(interp_axes, dtype=np.int32), 2: int64_array(0)},
{1: mo_array(interp_axes, dtype=np.int32), 2: int64_array(0)},
{'name': shape.name + '/Gathered'}, shape)
interpolate.in_port(1).get_connection().set_source(gather.out_port(0))
@@ -129,7 +130,7 @@ class InterpolateReshapeWA(BackReplacementPattern):
name = interpolate.soft_get('name', interpolate.id)
shape = Shape(graph, {'name': name + '/ShapeOf'}).create_node()
shape.in_port(0).connect(interpolate.in_port(0).get_source())
gather = create_op_with_const_inputs(graph, Gather, {1: np.array(axes, dtype=np.int32), 2: int64_array(0)},
gather = create_op_with_const_inputs(graph, Gather, {1: mo_array(axes, dtype=np.int32), 2: int64_array(0)},
{'name': shape.name + '/Gathered'}, shape)
multipliers = output_shape[axes] / input_shape[axes]
mul = create_op_node_with_second_input(graph, Mul, multipliers, {'name': gather.name + '/Multiplied'}, gather)

View File

@@ -1,11 +1,10 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.back.ForceStrictPrecision import ForceStrictPrecision
from openvino.tools.mo.ops.prelu import PReLU
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.graph.graph import Graph, rename_node
from openvino.tools.mo.ops.const import Const
@@ -36,7 +35,7 @@ class LeakyReLUMutation(BackReplacementPattern):
prelu = PReLU(graph, dict(name=relu_name)).create_node()
rename_node(prelu, relu_name)
const = Const(graph, dict(name=relu_name + "/weights", value=np.array([relu.negative_slope]))).create_node()
const = Const(graph, dict(name=relu_name + "/weights", value=mo_array([relu.negative_slope]))).create_node()
relu.in_port(0).get_connection().set_destination(prelu.in_port(0))
const.out_port(0).connect(prelu.in_port(1))

View File

@@ -8,6 +8,7 @@ from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.ops.normalize_l2 import NormalizeL2Op
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
from openvino.tools.mo.graph.graph import Graph, rename_node
@@ -48,11 +49,11 @@ class NormalizeToNormalizeL2(BackReplacementPattern):
# in the code below we intentionally use get_source() to get the out port. Because updating the out port will
# update the Const node 'value' and 'shape' attributes
if node.channel_shared or all(weights == weights[0]):
node.in_port(1).get_source().data.set_value(np.array([weights[0]]))
node.in_port(1).get_source().data.set_value(mo_array([weights[0]]))
else:
new_shape = np.ones((len(node.in_port(0).data.get_shape())), dtype=np.int64)
new_shape[1] = -1
node.in_port(1).get_source().data.set_value(np.array(weights).reshape(new_shape))
node.in_port(1).get_source().data.set_value(mo_array(weights).reshape(new_shape))
mul = Mul(graph, {'name': output_name}).create_node()
rename_node(mul, output_name)

View File

@@ -4,6 +4,7 @@
import numpy as np
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.graph.graph import Graph
@@ -28,11 +29,11 @@ class PackBinaryWeights(BackReplacementPattern):
weights_rounded = np.round(weights)
assert np.all(np.isclose(weights, weights_rounded))
assert len(conv.in_node(1).out_nodes()) == 1
weights_rounded = np.array(weights_rounded, dtype=np.int32) + 1 # -1 --> 0
weights_rounded = mo_array(weights_rounded, dtype=np.int32) + 1 # -1 --> 0
# Reversing element in chunks by 8 elements to pack bits correctly
# First need to pad data with necessary number of element to make the length dividable by 8
pad = (-len(weights_rounded)) % 8
weights_rounded = np.array(np.concatenate((weights_rounded, np.zeros([pad]))), dtype=np.int32)
weights_rounded = mo_array(np.concatenate((weights_rounded, np.zeros([pad]))), dtype=np.int32)
assert len(weights_rounded) % 8 == 0
weights_rounded = weights_rounded.reshape([len(weights_rounded) // 8, 8])
weights_rounded = np.flip(weights_rounded, axis=1)

View File

@@ -9,6 +9,7 @@ from openvino.tools.mo.back.ReshapeMutation import ReshapeMutation
from openvino.tools.mo.back.StridedSliceMasksNormalizer import StridedSliceMasksNormalizer
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.reshape import Reshape
@@ -42,9 +43,9 @@ class ProposalMutation(BackReplacementPattern):
'Elements with indices 4 and 5 will be ignored.'.format(node.soft_get('name', node.id)),
extra={'is_warning': True})
cropped_im_info = create_op_with_const_inputs(graph, StridedSlice, {1: np.array([0, 0], dtype=np.int32),
2: np.array([1, 3], dtype=np.int32),
3: np.array([1, 1], dtype=np.int32)},
cropped_im_info = create_op_with_const_inputs(graph, StridedSlice, {1: mo_array([0, 0], dtype=np.int32),
2: mo_array([1, 3], dtype=np.int32),
3: mo_array([1, 1], dtype=np.int32)},
{'name': 'cropped_im_info',
'begin_mask': int64_array([1, 1]),
'end_mask': int64_array([1, 1]),

View File

@@ -9,6 +9,7 @@ from openvino.tools.mo.ops.gather import Gather
from openvino.tools.mo.ops.split import Split
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.graph.graph import Node
@@ -221,7 +222,7 @@ class ReverseChannelsPropagationDown(BackReplacementPattern):
# insert ReverseChannels on weights port of Convolution
ric_to_move_to_weights = reverse_channels if group == 1 else reverse_channels.copy_node()
ric_to_move_to_weights['axis'] = np.array(channel_idx)
ric_to_move_to_weights['axis'] = mo_array(channel_idx)
src = node.in_port(1).get_connection().get_source()
node.in_port(1).get_connection().set_source(ric_to_move_to_weights.out_port(0))
src.disconnect()
@@ -235,8 +236,8 @@ class ReverseChannelsPropagationDown(BackReplacementPattern):
multiplier = int(bottom_channels / group)
new_order = np.take(np.arange(bottom_channels).reshape((group, multiplier)),
indices=reverse_channels.order, axis=0).flatten()
reverse_channels['axis'] = np.array(reverse_channels.axis.copy())
reverse_channels['order'] = np.array(new_order)
reverse_channels['axis'] = mo_array(reverse_channels.axis.copy())
reverse_channels['order'] = mo_array(new_order)
node.out_port(0).get_connection().set_source(reverse_channels.out_port(0))
node.out_port(0).disconnect()
@@ -285,7 +286,7 @@ class ReverseChannelsPropagationDown(BackReplacementPattern):
# reversing eltwise inputs where applicable
for port, axis in port_axis:
ric_copy = reverse_channels.copy_node({'axis': np.array(axis), 'order': np.array(reverse_channels.order)})
ric_copy = reverse_channels.copy_node({'axis': mo_array(axis), 'order': mo_array(reverse_channels.order)})
src = port.get_connection().get_source()
port.get_connection().set_source(ric_copy.out_port(0))
@@ -452,7 +453,7 @@ class ReverseChannelsPropagationUp(BackReplacementPattern):
copies = []
for port, axis in port_axis:
reverse_channels_copy = reverse_channels.copy_node({'axis': np.array(axis)})
reverse_channels_copy = reverse_channels.copy_node({'axis': mo_array(axis)})
src = port.get_connection().get_source()
if src.node.soft_get('type') == 'Parameter':

View File

@@ -6,11 +6,13 @@ from typing import Dict
import numpy as np
from openvino.tools.mo.ops.Cast import Cast
from openvino.tools.mo.ops.elementwise import Sub, Div, Mul, Negative, Equal
from openvino.tools.mo.ops.elementwise import Sub, Div, Mul, Equal
from openvino.tools.mo.ops.select import Select
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.graph.graph import Graph, Node
from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np, np_data_type_to_destination_type, packed_I4
from openvino.tools.mo.middle.pattern_match import apply_pattern
from openvino.tools.mo.ops.const import Const
@@ -90,7 +92,7 @@ class CompressQuantizeWeights(BackReplacementPattern):
16: (packed_I4, "signed"),
}
def pattern(self):
def pattern1(self):
return dict(
nodes=[
('const', dict(type='Const')),
@@ -103,6 +105,27 @@ class CompressQuantizeWeights(BackReplacementPattern):
]
)
def pattern2(self):
return dict(
nodes=[
('const', dict(type='Const')),
('const_d', dict()),
('convert', dict(type='Convert')),
('convert_d', dict()),
('fake_quantize', dict(type='FakeQuantize', levels=lambda x: x is not None and 2 < x <= 256)),
],
edges=[
('const', 'const_d'),
('const_d', 'convert'),
('convert', 'convert_d'),
('convert_d', 'fake_quantize', {'in': 0}),
]
)
def find_and_replace_pattern(self, graph: Graph):
apply_pattern(graph, **self.pattern1(), action=self.replace_pattern) # pylint: disable=no-member
apply_pattern(graph, **self.pattern2(), action=self.replace_pattern) # pylint: disable=no-member
@staticmethod
def quantize_data(fake_quantize: Node, dst_type: type, quantized_type: type, mode: str):
graph = fake_quantize.graph
@@ -120,8 +143,8 @@ class CompressQuantizeWeights(BackReplacementPattern):
assert mode in ["signed", "unsigned"]
i_min_value = -(levels // 2) if mode == "signed" else 0
i_min = np.array([i_min_value], dtype=dst_type)
i_max = np.array(levels + i_min - 1, dtype=dst_type)
i_min = mo_array([i_min_value], dtype=dst_type)
i_max = mo_array(levels + i_min - 1, dtype=dst_type)
assert i_max - i_min == levels - 1
out_low = Const(graph, dict(name=name + '/Copy/out_low', value=i_min)).create_node()
@@ -183,7 +206,7 @@ class CompressQuantizeWeights(BackReplacementPattern):
shift.in_port(0).connect(in_low)
shift.in_port(1).connect(descaled_output_low.out_port(0))
zero = Const(graph, {'name': name + '/zero', 'value': np.array(0, dtype=dst_type)}).create_node()
zero = Const(graph, {'name': name + '/zero', 'value': mo_array(0, dtype=dst_type)}).create_node()
scale_eq_zero = Equal(graph, {'name': name + '/scale_eq_zero'}).create_node()
scale_eq_zero.in_port(0).connect(scale.out_port(0))
scale_eq_zero.in_port(1).connect(zero.out_port(0))
@@ -209,9 +232,12 @@ class CompressQuantizeWeights(BackReplacementPattern):
def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
fake_quantize = match['fake_quantize']
dst_type = match['const'].value.dtype
if np.issubdtype(dst_type, np.floating):
dst_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)
if 'convert' in match:
dst_type = match['convert'].dst_type
match['convert']['stop_value_propagation'] = False
Cast.infer(match['convert'])
else:
dst_type = match['const'].value.dtype
quantized_type, mode = None, None
for quantization_levels in sorted(self.QUANTIZATION_MAP):

View File

@@ -6,6 +6,7 @@ import numpy as np
from openvino.tools.mo.back.ForceStrictPrecision import ForceStrictPrecision
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph, rename_nodes
from openvino.tools.mo.ops.const import Const
@@ -37,17 +38,17 @@ class PriorboxMutation(BackReplacementPattern):
assert len(node.in_ports()) == 2
begin = Const(graph, {'value': np.array([2], dtype=np.int32), 'name': name + '/ss_begin'}).create_node()
end = Const(graph, {'value': np.array([4], dtype=np.int32), 'name': name + '/ss_end'}).create_node()
stride = Const(graph, {'value': np.array([1], dtype=np.int32), 'name': name + '/ss_stride'}).create_node()
begin = Const(graph, {'value': mo_array([2], dtype=np.int32), 'name': name + '/ss_begin'}).create_node()
end = Const(graph, {'value': mo_array([4], dtype=np.int32), 'name': name + '/ss_end'}).create_node()
stride = Const(graph, {'value': mo_array([1], dtype=np.int32), 'name': name + '/ss_stride'}).create_node()
shape_0 = Shape(graph, {'name': name + '/0_port'}).create_node()
ss_0 = StridedSlice(graph, {'name': name + '/ss_0_port',
'begin_mask': np.array([1], dtype=np.int32),
'end_mask': np.array([0], dtype=np.int32),
'new_axis_mask': np.array([0], dtype=np.int32),
'shrink_axis_mask': np.array([0], dtype=np.int32),
'ellipsis_mask': np.array([0], dtype=np.int32)}).create_node()
'begin_mask': mo_array([1], dtype=np.int32),
'end_mask': mo_array([0], dtype=np.int32),
'new_axis_mask': mo_array([0], dtype=np.int32),
'shrink_axis_mask': mo_array([0], dtype=np.int32),
'ellipsis_mask': mo_array([0], dtype=np.int32)}).create_node()
shape_0.out_port(0).connect(ss_0.in_port(0))
begin.out_port(0).connect(ss_0.in_port(1))
@@ -61,11 +62,11 @@ class PriorboxMutation(BackReplacementPattern):
shape_1 = Shape(graph, {'name': name + '/1_port'}).create_node()
ss_1 = StridedSlice(graph, {'name': name + '/ss_1_port',
'begin_mask': np.array([1], dtype=np.int32),
'end_mask': np.array([0], dtype=np.int32),
'new_axis_mask': np.array([0], dtype=np.int32),
'shrink_axis_mask': np.array([0], dtype=np.int32),
'ellipsis_mask': np.array([0], dtype=np.int32)}).create_node()
'begin_mask': mo_array([1], dtype=np.int32),
'end_mask': mo_array([0], dtype=np.int32),
'new_axis_mask': mo_array([0], dtype=np.int32),
'shrink_axis_mask': mo_array([0], dtype=np.int32),
'ellipsis_mask': mo_array([0], dtype=np.int32)}).create_node()
shape_1.out_port(0).connect(ss_1.in_port(0))
begin.out_port(0).connect(ss_1.in_port(1))

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
from openvino.tools.mo.graph.graph import Graph, rename_node
@@ -24,8 +25,8 @@ class AttributedClampNormalizer(FrontReplacementPattern):
min_value = attr_clamp.soft_get('min', np.finfo(np.float32).min)
max_value = attr_clamp.soft_get('max', np.finfo(np.float32).max)
new_clamp = create_op_with_const_inputs(graph, Clamp,
{1: np.array(min_value, dtype=np.float32),
2: np.array(max_value, dtype=np.float32)},
{1: float32_array(min_value),
2: float32_array(max_value)},
{'name': original_name})
rename_node(new_clamp, original_name)

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.broadcast import Broadcast
@@ -30,7 +29,7 @@ class FillToBroadcast(FrontReplacementPattern):
assert fill_node.has_valid('fill_value')
assert fill_node.has_and_set('input_as_shape')
const = Const(graph, {'value': np.array(fill_node.fill_value), 'name': name + '/value'}).create_node()
const = Const(graph, {'value': mo_array(fill_node.fill_value), 'name': name + '/value'}).create_node()
broadcast_node = Broadcast(graph, {'name': name + '/Broadcast'}).create_node()
fill_node.in_port(0).get_connection().set_destination(broadcast_node.in_port(1))
const.out_port(0).connect(broadcast_node.in_port(0))

View File

@@ -4,9 +4,8 @@
import inspect
import logging as log
import numpy as np
from openvino.tools.mo.ops.elementwise import Mul, Add
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.const import Const
@@ -25,17 +24,17 @@ class InterpolateNormalizer(FrontReplacementOp):
if 1 not in node.in_ports() or node.in_port(1).disconnected():
if node.has_valid('factor') and not node.has_valid('width') and not node.has_valid('height'):
factor = Const(graph, {'value': np.array(node.factor)}).create_node()
factor = Const(graph, {'value': mo_array(node.factor)}).create_node()
shape = Shape(graph, {'name': node.name + '/shape'}).create_node()
begin = Const(graph, {'value': np.array([2])}).create_node()
end = Const(graph, {'value': np.array([4])}).create_node()
stride = Const(graph, {'value': np.array([1])}).create_node()
ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': np.array([1]),
'end_mask': np.array([0]), 'new_axis_mask': np.array([0]),
'shrink_axis_mask': np.array([0]),
'ellipsis_mask': np.array([0])}).create_node()
begin = Const(graph, {'value': mo_array([2])}).create_node()
end = Const(graph, {'value': mo_array([4])}).create_node()
stride = Const(graph, {'value': mo_array([1])}).create_node()
ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': mo_array([1]),
'end_mask': mo_array([0]), 'new_axis_mask': mo_array([0]),
'shrink_axis_mask': mo_array([0]),
'ellipsis_mask': mo_array([0])}).create_node()
mul = Mul(graph, {'name': node.name + '/factor_mul_'}).create_node()
@@ -55,13 +54,13 @@ class InterpolateNormalizer(FrontReplacementOp):
else:
shape = Shape(graph, {'name': node.name + '/shape'}).create_node()
begin = Const(graph, {'value': np.array([2])}).create_node()
end = Const(graph, {'value': np.array([4])}).create_node()
stride = Const(graph, {'value': np.array([1])}).create_node()
ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': np.array([1]),
'end_mask': np.array([0]), 'new_axis_mask': np.array([0]),
'shrink_axis_mask': np.array([0]),
'ellipsis_mask': np.array([0])}).create_node()
begin = Const(graph, {'value': mo_array([2])}).create_node()
end = Const(graph, {'value': mo_array([4])}).create_node()
stride = Const(graph, {'value': mo_array([1])}).create_node()
ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': mo_array([1]),
'end_mask': mo_array([0]), 'new_axis_mask': mo_array([0]),
'shrink_axis_mask': mo_array([0]),
'ellipsis_mask': mo_array([0])}).create_node()
source = node.in_port(0).get_connection().get_source()
source.connect(shape.in_port(0))
@@ -71,7 +70,7 @@ class InterpolateNormalizer(FrontReplacementOp):
stride.out_port(0).connect(ss.in_port(3))
pads_value = node.pads_begin + node.pads_end
pads_const = Const(graph, {'value': np.array(pads_value)}).create_node()
pads_const = Const(graph, {'value': mo_array(pads_value)}).create_node()
add = Add(graph, {'name': node.name + '/pad_add'}).create_node()
ss.out_port(0).connect(add.in_port(0))
add.in_port(1).connect(pads_const.out_port(0))
@@ -83,18 +82,18 @@ class InterpolateNormalizer(FrontReplacementOp):
return None
const = Const(graph, {'name': node.name + '/pre_shrink_sub_const',
'value': np.array(-1)}).create_node()
'value': mo_array(-1)}).create_node()
sub = Add(graph, {'name': node.name + '/pre_shrink_sub'}).create_node()
add.out_port(0).connect(sub.in_port(0))
sub.in_port(1).connect(const.out_port(0))
const = Const(graph, {'value': np.array(1 / shrink_factor),
const = Const(graph, {'value': mo_array(1 / shrink_factor),
'name': node.name + 'shrink_factor_div_const'}).create_node()
div = Mul(graph, {'name': node.name + 'shrink_factor_div'}).create_node()
sub.out_port(0).connect(div.in_port(0))
div.in_port(1).connect(const.out_port(0))
const = Const(graph, {'name': node.name + '/shrink_factor_add_one_const', 'value': np.array(1)
const = Const(graph, {'name': node.name + '/shrink_factor_add_one_const', 'value': mo_array(1)
}).create_node()
add = Add(graph, {'name': node.name + '/shrink_factor_add_one'}).create_node()
div.out_port(0).connect(add.in_port(0))
@@ -118,13 +117,13 @@ class InterpolateNormalizer(FrontReplacementOp):
# Commented out section represents reshape that used in deeplab-caffe
# Uncomment the following lines, if your model was trained with deeplab-caffe
# or have the same reshape method
# const = Const(graph, {'value': np.array(-1),
# const = Const(graph, {'value': mo_array(-1),
# 'name': node.name + 'zoom_factor_deeplab-caffe_sub_const'}).create_node()
# sub = Add(graph, {'name': node.name + 'zoom_factor_deeplab-caffe_sub'}).create_node()
# add.out_port(0).connect(sub.in_port(0))
# const.out_port(0).connect(sub.in_port(1))
#
# const = Const(graph, {'value': np.array(zoom_factor - 1),
# const = Const(graph, {'value': mo_array(zoom_factor - 1),
# 'name': node.name + 'zoom_factor_deeplab-caffe_mul_const'}).create_node()
# mul = Mul(graph, {'name': node.name + 'zoom_factor_deeplab-caffe_mul'}).create_node()
# sub.out_port(0).connect(mul.in_port(0))
@@ -139,7 +138,7 @@ class InterpolateNormalizer(FrontReplacementOp):
# sum.out_port(0).connect(node.in_port(1))
# Comment out the following lines if you use the reshape method from previous section
const = Const(graph, {'value': np.array(zoom_factor),
const = Const(graph, {'value': mo_array(zoom_factor),
'name': node.name + '/zoom_factor_mul_const'}).create_node()
mul = Mul(graph, {'name': node.name + '/zoom_factor_mul'}).create_node()
@@ -151,7 +150,7 @@ class InterpolateNormalizer(FrontReplacementOp):
mul.out_port(0).connect(node.in_port(1))
elif node.soft_get('width') != 0 and node.soft_get('height') != 0:
const = Const(graph, {'value': np.array([node.height, node.width])}).create_node()
const = Const(graph, {'value': mo_array([node.height, node.width])}).create_node()
node.add_input_port(1, skip_if_exist=True)
assert node.in_port(1).disconnected()
const.out_port(0).connect(node.in_port(1))
@@ -166,23 +165,23 @@ class InterpolateNormalizer(FrontReplacementOp):
log.error('Zoom factor should be positive in node {}'.format(node.id))
return None
const = Const(graph, {'value': np.array(-1)}).create_node()
const = Const(graph, {'value': mo_array(-1)}).create_node()
sub = Add(graph, {'name': node.name + '/shrink_zoom_factor_sub'}).create_node()
add.out_port(0).connect(sub.in_port(0))
const.out_port(0).connect(sub.in_port(1))
const = Const(graph, {'value': np.array(1 / (shrink_factor + 1))}).create_node()
const = Const(graph, {'value': mo_array(1 / (shrink_factor + 1))}).create_node()
div = Mul(graph, {'name': node.name + '/shrink_factor_div'}).create_node()
sub.out_port(0).connect(div.in_port(0))
const.out_port(0).connect(div.in_port(1))
const = Const(graph, {'value': np.array(-1),
const = Const(graph, {'value': mo_array(-1),
'name': node.name + 'shrink_zoom_factor_sum_const'}).create_node()
sum = Add(graph, {'name': node.name + '/shrink_zoom_factor_sum'}).create_node()
div.out_port(0).connect(sum.in_port(0))
const.out_port(0).connect(sum.in_port(1))
const = Const(graph, {'value': np.array(zoom_factor - 1)}).create_node()
const = Const(graph, {'value': mo_array(zoom_factor - 1)}).create_node()
mul = Mul(graph, {'name': node.name + '/zoom_factor_mul'}).create_node()
sum.out_port(0).connect(mul.in_port(0))
const.out_port(0).connect(mul.in_port(1))
@@ -198,13 +197,13 @@ class InterpolateNormalizer(FrontReplacementOp):
if node.soft_get('fw') == 'caffe':
shape = Shape(graph, {'name': node.name + '/shape'}).create_node()
begin = Const(graph, {'value': np.array([2])}).create_node()
end = Const(graph, {'value': np.array([4])}).create_node()
stride = Const(graph, {'value': np.array([1])}).create_node()
ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': np.array([1]),
'end_mask': np.array([0]), 'new_axis_mask': np.array([0]),
'shrink_axis_mask': np.array([0]),
'ellipsis_mask': np.array([0])}).create_node()
begin = Const(graph, {'value': mo_array([2])}).create_node()
end = Const(graph, {'value': mo_array([4])}).create_node()
stride = Const(graph, {'value': mo_array([1])}).create_node()
ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': mo_array([1]),
'end_mask': mo_array([0]), 'new_axis_mask': mo_array([0]),
'shrink_axis_mask': mo_array([0]),
'ellipsis_mask': mo_array([0])}).create_node()
source = node.in_port(1).get_connection().get_source()
node.in_port(1).disconnect()

View File

@@ -1,10 +1,9 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.interpolate import Interpolate
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
from openvino.tools.mo.graph.graph import Graph, rename_nodes
@@ -30,7 +29,7 @@ class InterpolateV1ToInterpolate(FrontReplacementPattern):
interpolate1_name = node.soft_get('name', node.id)
interpolate4 = create_op_with_const_inputs(graph, Interpolate,
{
2: np.array([1.0, 1.0]),
2: mo_array([1.0, 1.0]),
3: int64_array(node.axes)
},
{

View File

@@ -5,6 +5,7 @@ import numpy as np
from openvino.tools.mo.ops.activation_ops import Log
from openvino.tools.mo.ops.elementwise import Add
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes
from openvino.tools.mo.ops.const import Const
@@ -23,7 +24,7 @@ class Log1p(FrontReplacementOp):
const_dtype = np.float32
if node.has_valid('data_type'):
const_dtype = node.data_type
const = Const(graph, {'value': np.array([1], dtype=const_dtype)}).create_node()
const = Const(graph, {'value': mo_array([1], dtype=const_dtype)}).create_node()
add = Add(graph, {'name': node.name + '/Add_'}).create_node()
log = Log(graph, {'name': node.name + '/Log_'}).create_node()

View File

@@ -3,12 +3,11 @@
import math
import numpy as np
from openvino.tools.mo.ops.MatMul import MatMul
from openvino.tools.mo.ops.elementwise import Add, Mul
from openvino.tools.mo.ops.transpose import Transpose
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
@@ -78,7 +77,7 @@ class GemmDecomposer(FrontReplacementSubgraph):
name = node.soft_get('name', node.id)
node_output_port = node.out_port(0)
if node.has_valid('alpha') and not math.isclose(node.alpha, 1):
mul_alpha = create_op_with_const_inputs(graph, Mul, {1: np.array(node.alpha)},
mul_alpha = create_op_with_const_inputs(graph, Mul, {1: mo_array(node.alpha)},
{'name': name + '/Alpha', 'can_be_scaleshift': False})
node_output_port.get_connection().insert_node(mul_alpha)
node_output_port = mul_alpha.out_port(0)
@@ -93,7 +92,7 @@ class GemmDecomposer(FrontReplacementSubgraph):
node.in_port(2).get_connection().set_destination(bias_node.in_port(1))
node_output_port.connect(bias_node.in_port(0))
if node.has_valid('beta') and not math.isclose(node.beta, 1):
bias_node.insert_op_on_input_port(in_port_idx=1, new_op_class=Mul, value=np.array(node.beta),
bias_node.insert_op_on_input_port(in_port_idx=1, new_op_class=Mul, value=mo_array(node.beta),
new_op_attrs={'name': name + '/Beta',
'can_be_scaleshift': False})
del node['beta']

View File

@@ -1,9 +1,8 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.elementwise import Mul, Add, Pow
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.const import Const
@@ -19,21 +18,21 @@ class PowerToEltwises(FrontReplacementOp):
out_port = op.in_port(0).get_source()
if op.soft_get('scale', 1) != 1:
const = Const(graph, {'value': np.array(op.scale)}).create_node()
const = Const(graph, {'value': mo_array(op.scale)}).create_node()
mul = Mul(graph, {'name': op.name + '/mul_'}).create_node()
const.out_port(0).connect(mul.in_port(1))
out_port.connect(mul.in_port(0))
out_port = mul.out_port(0)
if op.soft_get('shift', 0) != 0:
const = Const(graph, {'value': np.array(op.shift)}).create_node()
const = Const(graph, {'value': mo_array(op.shift)}).create_node()
add = Add(graph, {'name': op.name + '/add_'}).create_node()
const.out_port(0).connect(add.in_port(1))
out_port.connect(add.in_port(0))
out_port = add.out_port(0)
if op.soft_get('power', 1) != 1:
const = Const(graph, {'value': np.array(op.power)}).create_node()
const = Const(graph, {'value': mo_array(op.power)}).create_node()
pow = Pow(graph, {'name': op.name + '/pow_'}).create_node()
const.out_port(0).connect(pow.in_port(1))
out_port.connect(pow.in_port(0))

View File

@@ -1,9 +1,8 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.elementwise import Add, Mul
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.const import Const
@@ -33,7 +32,7 @@ class BinaryFakeQuantizeNormalization(FrontReplacementPattern):
quantize = match['quantize']
sum_node = Add(graph, dict()).create_node()
const = Const(graph, {'value': np.array(0.5)}).create_node()
const = Const(graph, {'value': mo_array(0.5)}).create_node()
mul_node = Mul(graph, dict()).create_node()
mul_node.in_port(0).connect(sum_node.out_port(0))

View File

@@ -8,6 +8,7 @@ from openvino.tools.mo.ops.gather import Gather
from openvino.tools.mo.ops.range import Range
from openvino.tools.mo.ops.select import Select
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph, rename_nodes, Node
@@ -72,8 +73,8 @@ class ExpandRangeConstant(FrontReplacementSubgraph):
const.out_port(0).connect(shapeof_node.in_port(0))
range_node = create_op_with_const_inputs(graph, Range,
{0: np.array(0, dtype=value.dtype),
2: np.array(1, dtype=value.dtype)},
{0: mo_array(0, dtype=value.dtype),
2: mo_array(1, dtype=value.dtype)},
{'name': const_name + '/Range', 'dtype': value.dtype})
select_node.out_port(0).connect(range_node.in_port(1))

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.BatchNormInference import BatchNormInference
from openvino.tools.mo.front.caffe.extractors.utils import embed_input
from openvino.tools.mo.front.extractor import FrontExtractorOp
@@ -22,8 +23,8 @@ class BatchNormalizationExtractor(FrontExtractorOp):
if pb_model:
blobs = pb_model.blobs
assert len(blobs) >= 2, 'BatchNorm accepts not less then two input blobs'
mean = np.array(blobs[0].data)
variance = np.array(blobs[1].data)
mean = mo_array(blobs[0].data)
variance = mo_array(blobs[1].data)
if len(blobs) == 3:
scale = blobs[2].data[0]

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array
from openvino.tools.mo.ops.fakequantize import FakeQuantize
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.graph.graph import Node, Graph
@@ -19,7 +18,7 @@ class BinarizationToQuantize(FrontReplacementOp):
def replace_op(self, graph: Graph, node: Node):
in_node_0 = node.in_node(0)
broadcast = lambda x: np.array([x], dtype=np.float32)
broadcast = lambda x: float32_array([x])
threshold = Const(graph, {'name': node.id + "/Input_1", "value": broadcast(0)}).create_node()
in_1 = threshold
in_2 = threshold

View File

@@ -4,6 +4,7 @@
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import input_as_const
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.scale_shift import ScaleShiftOp
@@ -28,10 +29,10 @@ class BNToScaleShift(FrontReplacementOp):
if len(blobs) != 4:
raise Error("Incorrect number of blobs in BN layer {}".format(node.id))
mean = np.array(blobs[0].data)
var = np.array(blobs[1].data)
betta = np.array(blobs[2].data)
gamma = np.array(blobs[3].data)
mean = mo_array(blobs[0].data)
var = mo_array(blobs[1].data)
betta = mo_array(blobs[2].data)
gamma = mo_array(blobs[3].data)
gamma = gamma + np.repeat(param.eps, gamma.shape)

View File

@@ -1,10 +1,9 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import get_spatial_attr, get_list_from_container, weights_biases
from openvino.tools.mo.front.common.extractors.utils import layout_attrs
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.convolution import Convolution
from openvino.tools.mo.utils.error import Error
@@ -97,18 +96,18 @@ def conv_create_attrs(params):
return {
'bias_addable': True,
'bias_term': params['bias_term'],
'pad': np.array([[0, 0], [0, 0], [params['padding'][1], params['padding'][1]],
[params['padding'][0], params['padding'][0]]], dtype=np.int64),
'pad_spatial_shape': np.array([[params['padding'][1], params['padding'][1]],
[params['padding'][0], params['padding'][0]]], dtype=np.int64),
'dilation': np.array([1, 1, params['dilate'][1], params['dilate'][0]], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], [params['padding'][1], params['padding'][1]],
[params['padding'][0], params['padding'][0]]]),
'pad_spatial_shape': int64_array([[params['padding'][1], params['padding'][1]],
[params['padding'][0], params['padding'][0]]]),
'dilation': int64_array([1, 1, params['dilate'][1], params['dilate'][0]]),
'output_spatial_shape': None,
'output_shape': None,
'stride': np.array([1, 1, params['stride'][1], params['stride'][0]], dtype=np.int64),
'stride': int64_array([1, 1, params['stride'][1], params['stride'][0]]),
'group': params['group'],
'output': params['output'],
'kernel_spatial': np.array([params['kernel'][1], params['kernel'][0]], dtype=np.int64),
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64),
'kernel_spatial': int64_array([params['kernel'][1], params['kernel'][0]]),
'kernel_spatial_idx': int64_array([2, 3]),
'reshape_kernel': True,
'input_feature_channel': 1,

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.elementwise import Add, Mul, Maximum
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.caffe.extractors.utils import embed_input
@@ -53,7 +52,7 @@ class EltwiseExtractor(FrontExtractorOp):
lin_op_class = eltwise_caffe_map[operation]
mapping_rule = merge_attrs(param, {'coeff': np.array(param.coeff)})
mapping_rule = merge_attrs(param, {'coeff': mo_array(param.coeff)})
mapping_rule.update(layout_attrs())
assert len(param.coeff) <= input_len

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.eltwise_n import EltwiseNReplacement
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
@@ -25,7 +24,7 @@ class EltwiseAddNormalize(FrontReplacementPattern):
def __insert_mul_node_with_coeff(node: Node, port: int, coeff: float):
if coeff != 1:
mul_node = Mul(node.graph, {'name': node.id + '/coeff_mul'}).create_node()
const_node = Const(node.graph, {'name': node.id + '/coeff', 'value': np.array([coeff])}).create_node()
const_node = Const(node.graph, {'name': node.id + '/coeff', 'value': mo_array([coeff])}).create_node()
node.in_port(port).get_connection().insert_node(mul_node)
const_node.out_port(0).connect(mul_node.in_port(1))

View File

@@ -2,7 +2,6 @@
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.caffe.extractors.native_caffe import native_caffe_node_extractor
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.front.common.register_custom_ops import extension_op_extractor
from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp
from openvino.tools.mo.graph.graph import Node

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Node
from openvino.tools.mo.ops.const import Const
@@ -16,7 +17,7 @@ def dim_to_shape(dim):
Returns:
shape of the layer as np.array
"""
return np.array(dim, dtype=np.int64)
return int64_array(dim)
def embed_input(attrs: dict, port: int, name: str, value: np.array, bin_name: str = None):
@@ -39,7 +40,9 @@ def embed_input(attrs: dict, port: int, name: str, value: np.array, bin_name: st
# memory safe value conversion to numpy;
# previously we used `np.array(value)` and it was greedy for memory on caffe models especially
val = np.ndarray(shape=(len(value),))
# previously we always created float64 np.ndarray, now we force float32, we can't get data type from "value" for
# Caffe, because it comes as float64 from protobuf
val = np.ndarray(shape=(len(value),), dtype=np.float32)
for i, item in enumerate(value):
val[i] = item
attrs[name] = val

View File

@@ -3,14 +3,15 @@
import importlib
import logging as log
import mmap
import os
import sys
import mmap
import numpy as np
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array
from openvino.tools.mo.front.extractor import add_outputs_identity
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.utils.error import Error, FrameworkError
@@ -37,7 +38,7 @@ def parse_mean(file_path: str, in_shape: np.ndarray, mean_file_offsets: [tuple,
try:
blob.ParseFromString(data)
data = np.array(blob.data) # pylint: disable=no-member
data = mo_array(blob.data) # pylint: disable=no-member
if blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
data = data.reshape(blob.channels, blob.height, blob.width) # pylint: disable=no-member
@@ -203,7 +204,7 @@ def caffe_pb_to_nx(graph, proto, model):
# input_dim: 3
# input_dim: 500
# input_dim: 500
input_dims = [np.array(list(proto.input_dim), dtype=np.int64)]
input_dims = [int64_array(list(proto.input_dim))]
input_names = [proto.input[0]]
elif len(list(proto.input)) == 1 and len(list(proto.input_shape)):
@@ -216,7 +217,7 @@ def caffe_pb_to_nx(graph, proto, model):
# dim: 227
# dim: 227
# }
input_dims = [np.array(proto.input_shape[0].dim, dtype=np.int64)]
input_dims = [int64_array(proto.input_shape[0].dim)]
input_names = [proto.input[0]]
elif len(proto.input_shape) > 0:
@@ -236,7 +237,7 @@ def caffe_pb_to_nx(graph, proto, model):
# dim: 3
# }
for i in range(len(proto.input_shape)):
input_dims.append(np.array(proto.input_shape[i].dim, dtype=np.int64))
input_dims.append(int64_array(proto.input_shape[i].dim))
input_names.append(proto.input[i])
for i in range(len(input_names)):
@@ -283,7 +284,7 @@ def caffe_pb_to_nx(graph, proto, model):
}
"""
dims = map(int, list(filter(None, str(list(input_param.shape)[0]).split('dim:'))))
input_dims.append(np.array(list(dims), dtype=np.int64))
input_dims.append(int64_array(list(dims)))
input_names.append(layer.name)
node_id = graph.unique_id(layer.name)

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.transpose import Transpose
from openvino.tools.mo.front.extractor import FrontExtractorOp
@@ -14,5 +15,5 @@ class PermuteFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
order = node.pb.permute_param.order
Transpose.update_node_stat(node, {'order': np.array(order, dtype=np.int32)})
Transpose.update_node_stat(node, {'order': mo_array(order, dtype=np.int32)})
return cls.enabled

View File

@@ -1,10 +1,9 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import get_spatial_attr
from openvino.tools.mo.front.common.extractors.utils import layout_attrs
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.pooling import Pooling
@@ -50,10 +49,10 @@ class PoolingFrontExtractor(FrontExtractorOp):
rt = 'floor'
attrs = {
'window': np.array([1, 1, kernel[1], kernel[0]], dtype=np.int64),
'stride': np.array([1, 1, stride[1], stride[0]], dtype=np.int64),
'pad': np.array([[0, 0], [0, 0], [padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'pad_spatial_shape': np.array([[padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'window': int64_array([1, 1, kernel[1], kernel[0]]),
'stride': int64_array([1, 1, stride[1], stride[0]]),
'pad': int64_array([[0, 0], [0, 0], [padding[1], padding[1]], [padding[0], padding[0]]]),
'pad_spatial_shape': int64_array([[padding[1], padding[1]], [padding[0], padding[0]]]),
'pool_method': method,
'exclude_pad': exclude_pad,
'global_pool': global_pooling,

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.priorbox import PriorBoxOp
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.common.extractors.utils import layout_attrs
@@ -23,9 +22,9 @@ class PriorBoxFrontExtractor(FrontExtractorOp):
variance = [0.1]
update_attrs = {
'aspect_ratio': np.array(param.aspect_ratio),
'min_size': np.array(param.min_size),
'max_size': np.array(param.max_size),
'aspect_ratio': mo_array(param.aspect_ratio),
'min_size': mo_array(param.min_size),
'max_size': mo_array(param.max_size),
'flip': int(param.flip),
'clip': int(param.clip),
'variance': list(variance),
@@ -42,11 +41,11 @@ class PriorBoxFrontExtractor(FrontExtractorOp):
# so check if it is set or set to default
fields = [field[0].name for field in param.ListFields()]
if 'density' in fields:
update_attrs['density'] = np.array(param.density)
update_attrs['density'] = mo_array(param.density)
if 'fixed_size' in fields:
update_attrs['fixed_size'] = np.array(param.fixed_size)
update_attrs['fixed_size'] = mo_array(param.fixed_size)
if 'fixed_ratio' in fields:
update_attrs['fixed_ratio'] = np.array(param.fixed_ratio)
update_attrs['fixed_ratio'] = mo_array(param.fixed_ratio)
mapping_rule = merge_attrs(param, update_attrs)

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.extractor import FrontExtractorOp
@@ -20,8 +19,8 @@ class ProposalFrontExtractor(FrontExtractorOp):
'feat_stride': param.feat_stride,
'base_size': param.base_size,
'min_size': param.min_size,
'ratio': np.array(param.ratio),
'scale': np.array(param.scale),
'ratio': mo_array(param.ratio),
'scale': mo_array(param.scale),
'pre_nms_topn': param.pre_nms_topn,
'post_nms_topn': param.post_nms_topn,
'nms_thresh': param.nms_thresh

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.regionyolo import RegionYoloOp
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.common.extractors.utils import layout_attrs
@@ -28,8 +27,8 @@ class RegionYoloFrontExtractor(FrontExtractorOp):
'classes': classes,
'num': num,
'do_softmax': int(param.do_softmax),
'anchors': np.array(param.anchors),
'mask': np.array(param.mask)
'anchors': mo_array(param.anchors),
'mask': mo_array(param.mask)
}
flatten_attrs = {

View File

@@ -1,10 +1,8 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import embed_input, weights_biases
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.scale_shift import ScaleShiftOp
from openvino.tools.mo.utils.utils import NamedAttrsClass
@@ -25,8 +23,8 @@ class ScaleFrontExtractor(FrontExtractorOp):
if model is None and len(pb.bottom) == 1:
# default weights and biases for scale layer if the caffemodel file doesn't contain them
model = NamedAttrsClass({'blobs': np.array([NamedAttrsClass({'data': np.array([1])}),
NamedAttrsClass({'data': np.array([0])})])})
model = NamedAttrsClass({'blobs': mo_array([NamedAttrsClass({'data': mo_array([1])}),
NamedAttrsClass({'data': mo_array([0])})])})
# scale with 1 input and 1 or 2 blobs
if model and len(model.blobs) != 0 and len(pb.bottom) == 1:
attrs.update(weights_biases(param.bias_term, model))
@@ -34,7 +32,7 @@ class ScaleFrontExtractor(FrontExtractorOp):
elif len(pb.bottom) == 2 and param.bias_term:
if model is None or len(model.blobs) == 0:
# default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them
model = NamedAttrsClass({'blobs': np.array([NamedAttrsClass({'data': np.array([0])})])})
model = NamedAttrsClass({'blobs': mo_array([NamedAttrsClass({'data': mo_array([0])})])})
embed_input(attrs, 1, 'biases', model.blobs[0].data)
ScaleShiftOp.update_node_stat(node, attrs)

View File

@@ -1,13 +1,13 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
def layout_attrs():
return {
'spatial_dims': np.array([2, 3], dtype=np.int64),
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'spatial_dims': int64_array([2, 3]),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW'
}

View File

@@ -4,10 +4,11 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value
from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array
from openvino.tools.mo.utils.error import Error
nchw_to_nhwc_permute = np.array([0, 2, 3, 1], dtype=np.int64)
nhwc_to_nchw_permute = np.array([0, 3, 1, 2], dtype=np.int64)
nchw_to_nhwc_permute = int64_array([0, 2, 3, 1])
nhwc_to_nchw_permute = int64_array([0, 3, 1, 2])
supported_layouts = ('NCHW', 'NHWC')
# the attribute 'layout' in the graph.graph can have two values only: "NCHW" or "NHWC". If the tensor has 5 dimensions
# then it is necessary to transform "NCHW" to "NCDHW" and "NHWC" to "NDHWC" respectively. The dictionary below id used
@@ -22,7 +23,7 @@ def convert_shape(shape: np.array, permute: np.array):
result = [0, 0, 0, 0]
for ind, perm_ind in enumerate(permute):
result[ind] = shape[perm_ind]
return np.array(result)
return mo_array(result)
def get_depth_dim(layout: str, shape_len: int):

View File

@@ -6,6 +6,7 @@ import os
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.find_inputs import find_inputs
@@ -104,5 +105,5 @@ def caffe_native_node_infer(node: Node):
)
for iout in range(len(node.out_nodes())):
output_shape = np.array(net.blobs[node.top].data.shape, dtype=np.int64)
output_shape = int64_array(net.blobs[node.top].data.shape)
node.out_node(iout).shape = output_shape

View File

@@ -3,9 +3,8 @@
import logging as log
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
def crop_infer(node):
@@ -31,11 +30,11 @@ def crop_infer(node):
if any(s is None for s in shapes):
return
input_shape = np.array(shapes[0])
input_shape = mo_array(shapes[0])
start_axis = get_canonical_axis_index(input_shape, node.axis)
node.axis = start_axis
reference_shape = np.array(shapes[1])
reference_shape = mo_array(shapes[1])
input_dim = input_shape.size
# set new shape to current shape

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Node
@@ -13,6 +12,6 @@ def multi_box_prior_infer_mxnet(node: Node):
num_ratios = len(node.aspect_ratio)
num_priors = len(node.min_size) + num_ratios - 1
if v10:
node.out_node(0).shape = np.array([2, data_H * data_W * num_priors * 4], dtype=np.int64)
node.out_node(0).shape = int64_array([2, data_H * data_W * num_priors * 4])
else:
node.out_node(0).shape = np.array([1, 2, data_H * data_W * num_priors * 4], dtype=np.int64)
node.out_node(0).shape = int64_array([1, 2, data_H * data_W * num_priors * 4])

View File

@@ -179,14 +179,25 @@ def int64_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.nda
return np.array(value, dtype=np.int64)
def float_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.ndarray:
return np.array(value, dtype=np.float64)
def float32_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.ndarray:
return np.array(value, dtype=np.float32)
def float_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.ndarray:
return float32_array(value)
def mo_array(value: Union[Iterable[Union[float, int]], float, int], dtype=None) -> np.ndarray:
"""
This function acts in a same way as np.array except for the case when dtype is not provided
and np.array return fp64 array this function returns fp32 array
"""
x = np.array(value, dtype=dtype)
if not isinstance(value, np.ndarray) and x.dtype == np.float64 and dtype != np.float64:
x = x.astype(np.float32)
return x
def mark_input_bins(node, names=('weights', 'biases'), start_port: int = 1):
"""
Preparing necessary attributes for edges at input ports starting from start_port.
@@ -200,9 +211,9 @@ def mark_input_bins(node, names=('weights', 'biases'), start_port: int = 1):
def assign_dims_to_weights(node, spatial, input_channel, output_channel=None, dims_number=None):
if spatial is not None:
node['spatial_dims'] = np.array(spatial, dtype=np.int64)
node['input_channel_dim'] = np.array(input_channel, dtype=np.int64)
node['output_channel_dim'] = np.array(output_channel, dtype=np.int64)
node['spatial_dims'] = int64_array(spatial)
node['input_channel_dim'] = int64_array(input_channel)
node['output_channel_dim'] = int64_array(output_channel)
if 'dim_attrs' in node and 'input_channel_dim' not in node['dim_attrs']:
node['dim_attrs'].append('input_channel_dim')
node['dims_number'] = dims_number

View File

@@ -5,6 +5,7 @@ import logging as log
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.middle.passes.convert_data_type import SUPPORTED_DATA_TYPES
@@ -46,14 +47,14 @@ class FreezePlaceholderValue(FrontReplacementSubgraph):
string_value = graph.graph['freeze_placeholder'][name]
try:
if data_type != np.bool:
value = np.array(string_value, dtype=data_type)
value = mo_array(string_value, dtype=data_type)
elif data_type == np.bool and graph.graph['fw'] == 'tf':
from openvino.tools.mo.front.tf.common import tf_data_type_cast
if isinstance(string_value, list):
casted_list = list()
for v in np.array(string_value):
for v in mo_array(string_value):
casted_list.append(tf_data_type_cast[ph.data_type](v))
value = np.array(string_value, dtype=data_type)
value = mo_array(string_value, dtype=data_type)
else:
value = tf_data_type_cast[ph.data_type](string_value)
else:

View File

@@ -6,6 +6,7 @@ import numpy as np
from openvino.tools.mo.ops.gather import Gather
from openvino.tools.mo.ops.interpolate import Interpolate
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
from openvino.tools.mo.graph.graph import Graph, Node
@@ -150,7 +151,7 @@ class InterpolateWithConcat(FrontReplacementPattern):
shape = Shape(graph, {'name': src.node.soft_get('name', src.node.id) + '/Shape'}).create_node()
shape.in_port(0).connect(src)
gather = create_op_with_const_inputs(graph, Gather,
{1: np.array(interp_axes, dtype=np.int32), 2: int64_array(0)},
{1: mo_array(interp_axes, dtype=np.int32), 2: int64_array(0)},
{'name': shape.name + '/Gathered'}, input_node=shape)
interpolate.in_port(1).get_connection().set_source(gather.out_port(0))

View File

@@ -20,7 +20,7 @@ class AddShiftFrontExtractor(FrontExtractorOp):
biases = read_binary_vector(pb)
bias_term = True
mapping_rule = {'bias_term': bias_term}
embed_input(mapping_rule, 1, 'weights', np.ones(biases.shape))
embed_input(mapping_rule, 1, 'weights', np.ones(biases.shape, dtype=np.float32))
embed_input(mapping_rule, 2, 'biases', biases)
ScaleShiftOp.update_node_stat(node, mapping_rule)
return cls.enabled

View File

@@ -1,10 +1,9 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import embed_input
from openvino.tools.mo.front.common.extractors.utils import layout_attrs
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_token_value, collect_until_whitespace, find_next_tag
from openvino.tools.mo.front.kaldi.utils import read_learning_info, read_binary_matrix, read_binary_vector
@@ -59,12 +58,12 @@ class ConvolutionalComponentFrontExtractor(FrontExtractorOp):
'output': output,
'patch_stride': patch_stride,
'bias_term': None,
'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
'stride': np.array([1, 1, 1, stride], dtype=np.int64),
'kernel_spatial': np.array([1, kernel], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
'pad_spatial_shape': int64_array([[0, 0], [0, 0]]),
'dilation': int64_array([1, 1, 1, 1]),
'kernel': int64_array([1, 1, 1, kernel]),
'stride': int64_array([1, 1, 1, stride]),
'kernel_spatial': int64_array([1, kernel]),
'input_feature_channel': 1,
'output_feature_channel': 0,
'kernel_spatial_idx': [2, 3],

View File

@@ -5,6 +5,7 @@ import numpy as np
from openvino.tools.mo.front.caffe.extractors.utils import embed_input
from openvino.tools.mo.front.common.extractors.utils import layout_attrs
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_token_value, collect_until_whitespace
from openvino.tools.mo.front.kaldi.utils import read_learning_info, read_binary_matrix, read_binary_vector
@@ -53,12 +54,12 @@ class ConvolutionalComponentFrontExtractor(FrontExtractorOp):
'output': output,
'patch_stride': patch_stride,
'bias_term': None,
'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
'stride': np.array([1, 1, 1, stride], dtype=np.int64),
'kernel_spatial': np.array([1, kernel], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
'pad_spatial_shape': int64_array([[0, 0], [0, 0]]),
'dilation': int64_array([1, 1, 1, 1]),
'kernel': int64_array([1, 1, 1, kernel]),
'stride': int64_array([1, 1, 1, stride]),
'kernel_spatial': int64_array([1, kernel]),
'input_feature_channel': 1,
'output_feature_channel': 0,
'kernel_spatial_idx': [2, 3],

View File

@@ -6,6 +6,7 @@ import numpy as np
from openvino.tools.mo.ops.gather import Gather
from openvino.tools.mo.ops.transpose import Transpose
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_integer32_token, read_blob
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
@@ -25,7 +26,7 @@ class CopyFrontExtractor(FrontReplacementOp):
node_name = node.soft_get('name', node.id)
const_attrs = {
'name': node_name + '/indexes',
'value': np.array(weights),
'value': mo_array(weights),
'shape': [weights_size],
'data_type': np.int32
}

View File

@@ -4,6 +4,7 @@
import numpy as np
from openvino.tools.mo.front.common.extractors.utils import layout_attrs
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_token_value, collect_until_token, \
read_binary_integer32_token, find_next_tag, read_placeholder
@@ -35,12 +36,12 @@ class MaxPoolingComponentFrontExtractor(FrontExtractorOp):
raise Error('Can not extract parameters for {}'.format(node))
mapping_rule = {
'window': np.array([1, 1, 1, kernel], dtype=np.int64),
'stride': np.array([1, 1, 1, stride], dtype=np.int64),
'window': int64_array([1, 1, 1, kernel]),
'stride': int64_array([1, 1, 1, stride]),
'pool_stride': pool_stride,
'pool_step': pool_step,
'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
'pad_spatial_shape': int64_array([[0, 0], [0, 0]]),
'pool_method': 'max',
}
mapping_rule.update(layout_attrs())

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_bool_token, read_binary_integer32_token, collect_until_token, \
read_binary_float_token
@@ -48,7 +49,7 @@ class TdnnComponentFrontExtractor(FrontExtractorOp):
collect_until_token(pb, b'<RankInOut>')
rank_in_out = read_binary_integer32_token(pb), read_binary_integer32_token(pb)
biases = np.array(bias_params) if len(bias_params) != 0 else None
biases = mo_array(bias_params) if len(bias_params) != 0 else None
attrs = {
'weights': np.reshape(weights, weights_shape),
'biases': biases,

View File

@@ -10,6 +10,7 @@ import numpy as np
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.ops.split import AttributedVariadicSplit
from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import add_outputs_identity
from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, find_next_component, get_name_from_path, \
find_end_of_component, end_of_nnet_tag, read_binary_integer32_token, get_parameters, read_token_value, \
@@ -157,7 +158,7 @@ def load_kalid_nnet1_model(graph, file_descr, name):
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
prev_node['shape'] = np.array([1, layer_i], dtype=np.int64)
prev_node['shape'] = int64_array([1, layer_i])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
@@ -190,7 +191,7 @@ def load_kalid_nnet2_model(graph, file_descr, nnet_name):
if prev_node.op == 'Parameter':
parameters = Node(graph, layer_id).parameters
input_dim = read_token_value(parameters, b'<InputDim>')
prev_node['shape'] = np.array([1, input_dim], dtype=np.int64)
prev_node['shape'] = int64_array([1, input_dim])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
@@ -323,7 +324,7 @@ def read_node(file_descr, graph, component_layer_map, layer_node_map):
if tokens[0] == b'input-node':
in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
in_name = str(in_name).strip('b').replace('\'', "")
in_shape = np.array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)
in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)
if in_name not in layer_node_map:
graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape)
@@ -399,12 +400,12 @@ def read_node(file_descr, graph, component_layer_map, layer_node_map):
if layer_name in layer_node_map:
node_name = layer_node_map[layer_name]
node = Node(graph, node_name)
node['parameters'] = {'offset': np.array([offset]), 'dim': np.array([dim]), 'axis': np.array([1])}
node['parameters'] = {'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])}
node['op'] = 'Crop'
else:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters={'offset': np.array([offset]), 'dim': np.array([dim]), 'axis': np.array([1])},
parameters={'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])},
op='Crop',
kind='op')
layer_node_map[layer_name] = node_name

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array
from openvino.tools.mo.middle.MakeKaldiConstReshapable import create_const_with_batch_from_input
from openvino.tools.mo.ops.MatMul import FullyConnected
from openvino.tools.mo.ops.activation_ops import Tanh, Sigmoid
@@ -188,8 +189,8 @@ class ReplaceLSTMNodePattern(FrontReplacementOp):
join_forget_remember_sum.in_port(1).connect(join_remember_candidates_mul.out_port(0))
# (7)Eltwise(sum) -> Clamp
join_forget_clamp = create_op_with_const_inputs(graph, Clamp, {1: np.array(-node.clip_value, dtype=np.float32),
2: np.array(node.clip_value, dtype=np.float32)},
join_forget_clamp = create_op_with_const_inputs(graph, Clamp, {1: float32_array(-node.clip_value),
2: float32_array(node.clip_value)},
{'name': 'join_forget_clamp'},
join_forget_remember_sum)
#

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.convolution import Convolution
@@ -24,21 +25,21 @@ class ConvFrontExtractor(FrontExtractorOp):
output = attr.int("num_filter", None)
bias_term = not attr.bool("no_bias", False)
final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None
final_dilations = int64_array([1, 1, *[d for d in dilate]]) if dilate is not None else None
node_attrs = {
'op': __class__.op,
'bias_addable': True,
'bias_term': bias_term,
'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),
'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]]),
'pad_spatial_shape': int64_array([[pad, pad] for pad in padding]),
'dilation': final_dilations,
'output_spatial_shape': None,
'output_shape': None,
'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),
'stride': int64_array([1, 1, *[s for s in stride]]),
'group': group,
'output': output,
'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),
'kernel_spatial': int64_array([k for k in kernel]),
'input_feature_channel': 1,
'output_feature_channel': 0,
@@ -46,8 +47,8 @@ class ConvFrontExtractor(FrontExtractorOp):
'reshape_kernel': True,
'spatial_dims': None,
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW',
}
@@ -65,9 +66,9 @@ class DeconvFrontExtractor(FrontExtractorOp):
padding = np.add.reduce(node.pad, axis=1)
padding[node.spatial_dims] = node.stride[node.spatial_dims] * (input_shape[node.spatial_dims] - 1) + 1 + \
(kernel_shape[node.spatial_dims] - 1) * node.dilation[node.spatial_dims]
padding[node.spatial_dims] = padding[node.spatial_dims] - node.output_spatial_shape;
padding[node.spatial_dims] = padding[node.spatial_dims] - node.output_spatial_shape
padding[node.spatial_dims] = (padding[node.spatial_dims] + 1) / 2
return np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding[2:]]], dtype=np.int64)
return int64_array([[0, 0], [0, 0], *[[pad, pad] for pad in padding[2:]]])
@classmethod
def extract(cls, node):
@@ -82,39 +83,39 @@ class DeconvFrontExtractor(FrontExtractorOp):
bias_term = not attr.bool("no_bias", True)
target_shape = attr.tuple("target_shape", int, None)
if target_shape:
target_shape = np.array(target_shape, dtype=np.int64)
target_shape = int64_array(target_shape)
final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None
final_dilations = int64_array([1, 1, *[d for d in dilate]]) if dilate is not None else None
node_attrs = {
'op': __class__.op,
'type': 'Deconvolution',
'bias_addable': True,
'bias_term': bias_term,
'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),
'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]]),
'pad_spatial_shape': int64_array([[pad, pad] for pad in padding]),
'dilation': final_dilations,
'output_spatial_shape': target_shape,
'original_output_spatial_shape': target_shape,
'output_shape': None,
'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),
'stride': int64_array([1, 1, *[s for s in stride]]),
'group': group,
'output': output,
'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),
'kernel_spatial': int64_array([k for k in kernel]),
'input_feature_channel': 1,
'output_feature_channel': 0,
'kernel_spatial_idx': None,
'reshape_kernel': True,
'spatial_dims': None,
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW',
'get_pad': DeconvFrontExtractor.get_pad,
}
output_padding = attr.tuple("adj", int, None)
if target_shape is None and output_padding:
node_attrs["output_padding"] = np.array([0, 0, *[s for s in output_padding]], dtype=np.int64)
node_attrs["output_padding"] = int64_array([0, 0, *[s for s in output_padding]])
# update the attributes of the node
Convolution.update_node_stat(node, node_attrs)

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.extractor import MXNetCustomFrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
@@ -26,8 +25,8 @@ class RPNProposalMXNetFrontExtractor(MXNetCustomFrontExtractorOp):
'feat_stride': feat_stride,
'base_size': 0,
'min_size': min_size,
'ratio': np.array(ratio),
'scale': np.array(scale),
'ratio': mo_array(ratio),
'scale': mo_array(scale),
'pre_nms_topn': pre_nms_topn,
'post_nms_topn': post_nms_topn,
'nms_thresh': nms_thresh,

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.deformable_convolution import DeformableConvolution
@@ -25,22 +26,22 @@ class DeformableConvolutionExtractor(FrontExtractorOp):
output = attr.int("num_filter", None)
bias_term = attr.str("no_bias", 'False') == 'False'
final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None
final_dilations = int64_array([1, 1, *[d for d in dilate]]) if dilate is not None else None
node_attrs = {
'op': __class__.op,
'bias_addable': True,
'bias_term': bias_term,
'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),
'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]]),
'pad_spatial_shape': int64_array([[pad, pad] for pad in padding]),
'dilation': final_dilations,
'output_spatial_shape': None,
'output_shape': None,
'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),
'stride': int64_array([1, 1, *[s for s in stride]]),
'group': num_group,
'deformable_group': num_deformable_group,
'output': output,
'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),
'kernel_spatial': int64_array([k for k in kernel]),
'input_feature_channel': 1,
'output_feature_channel': 0,
@@ -49,8 +50,8 @@ class DeformableConvolutionExtractor(FrontExtractorOp):
'weights_index': 2,
'spatial_dims': None,
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW',
}

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.elementwise import Mul, Sub, Add, Maximum, Minimum, Div, Greater, GreaterEqual, Equal, Less, \
LessEqual, Pow, NotEqual, LogicalAnd, LogicalOr, Round
from openvino.tools.mo.front.extractor import FrontExtractorOp
@@ -259,7 +260,7 @@ class PlusScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 0.0)], dtype=np.float32)
node['scalar'] = mo_array([attrs.float('scalar', 0.0)], dtype=np.float32)
return cls.enabled
@@ -270,7 +271,7 @@ class MinusScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 0.0)])
node['scalar'] = mo_array([attrs.float('scalar', 0.0)])
return cls.enabled
@@ -281,7 +282,7 @@ class MulScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 1.0)], dtype=np.float32)
node['scalar'] = mo_array([attrs.float('scalar', 1.0)], dtype=np.float32)
return cls.enabled
@@ -303,7 +304,7 @@ class GreaterScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 1.0)])
node['scalar'] = mo_array([attrs.float('scalar', 1.0)])
return cls.enabled
@@ -314,7 +315,7 @@ class GreaterEqualScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 1.0)])
node['scalar'] = mo_array([attrs.float('scalar', 1.0)])
return cls.enabled
@@ -325,7 +326,7 @@ class EqualScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 1.0)])
node['scalar'] = mo_array([attrs.float('scalar', 1.0)])
return cls.enabled
@@ -336,7 +337,7 @@ class NotEqualScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 1.0)])
node['scalar'] = mo_array([attrs.float('scalar', 1.0)])
return cls.enabled
@@ -347,7 +348,7 @@ class LesserScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 1.0)])
node['scalar'] = mo_array([attrs.float('scalar', 1.0)])
return cls.enabled
@@ -358,7 +359,7 @@ class LesserEqualScalarFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node['scalar'] = np.array([attrs.float('scalar', 1.0)])
node['scalar'] = mo_array([attrs.float('scalar', 1.0)])
return cls.enabled

View File

@@ -2,7 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.utils.error import Error
@@ -26,7 +26,7 @@ def mxnet_slice_axis_infer(node):
node.axis = get_canonical_axis_index(in_shape, node.axis)
slice_axis = node.axis
new_shape = np.array(in_shape, dtype=np.int64)
new_shape = int64_array(in_shape)
new_shape[slice_axis] = new_shape[slice_axis] / len(node.out_nodes())
axis_size = in_shape[slice_axis]

View File

@@ -1,15 +1,13 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from typing import Dict
from openvino.tools.mo.front.mxnet.mx_reshape_to_reshape import MXReshapeToReshape
from openvino.tools.mo.front.mxnet.ssd_detection_output_replacer import SsdPatternDetectionOutputReplacer
from openvino.tools.mo.ops.elementwise import Div, Add, Sub
from openvino.tools.mo.ops.split import Split
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph, Node
@@ -33,7 +31,7 @@ def calculate_prior_box_value(value: Node, value_to_div: Port, value_to_add: Por
graph = value.graph
dtype = data_type_str_to_np(graph.graph['cmd_params'].data_type)
_min = Sub(graph, dict(name=value.name + '/Sub')).create_node()
div = create_op_node_with_second_input(graph, Div, np.array([2], dtype=dtype), op_attrs=dict(name=value.name + '/Div'))
div = create_op_node_with_second_input(graph, Div, mo_array([2], dtype=dtype), op_attrs=dict(name=value.name + '/Div'))
div.in_port(0).connect(value_to_div)
_min.in_port(0).connect(value_to_add)
_min.in_port(1).connect(div.out_port(0))

View File

@@ -8,6 +8,7 @@ import os
import mxnet as mx
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import add_outputs_identity
from openvino.tools.mo.front.mxnet.extractor import common_mxnet_fields
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_node_edges, load_params, init_rnn_states, create_mxnet_edge
@@ -100,11 +101,11 @@ def symbol2nx(graph, model_nodes, model_params, input_names: str = ''):
fw_name_map = {}
for i, node in enumerate(model_nodes):
if node['name'] in model_params._arg_params and node['name'] not in input_names:
node['value'] = np.array(model_params._arg_params[node['name']].asnumpy(), dtype=np.float32)
node['value'] = mo_array(model_params._arg_params[node['name']].asnumpy(), dtype=np.float32)
elif node['name'] in model_params._aux_params and node['name'] not in input_names:
node['value'] = np.array(model_params._aux_params[node['name']].asnumpy(), dtype=np.float32)
node['value'] = mo_array(model_params._aux_params[node['name']].asnumpy(), dtype=np.float32)
elif node['name'] in names_rnn_states:
node['value'] = np.zeros(rnn_states[node['name']])
node['value'] = np.zeros(rnn_states[node['name']], dtype=np.float32)
node_name = graph.unique_id(node['name'])
graph.add_node(node_name, **symbol_attrs(node))
if hasattr(graph, 'op_names_statistic') and 'op' in node:

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.deformable_convolution import DeformableConvolution
@@ -25,22 +26,22 @@ class ModulatedDeformableConvolutionExtractor(FrontExtractorOp):
output = attr.int("num_filter", None)
bias_term = attr.str("no_bias", 'False') == 'False'
final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None
final_dilations = int64_array([1, 1, *[d for d in dilate]]) if dilate is not None else None
node_attrs = {
'op': __class__.op,
'bias_addable': True,
'bias_term': bias_term,
'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),
'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),
'pad': int64_array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]]),
'pad_spatial_shape': int64_array([[pad, pad] for pad in padding]),
'dilation': final_dilations,
'output_spatial_shape': None,
'output_shape': None,
'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),
'stride': int64_array([1, 1, *[s for s in stride]]),
'group': num_group,
'deformable_group': num_deformable_group,
'output': output,
'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),
'kernel_spatial': int64_array([k for k in kernel]),
'bilinear_interpolation_pad': True,
'input_feature_channel': 1,
@@ -51,8 +52,8 @@ class ModulatedDeformableConvolutionExtractor(FrontExtractorOp):
'in_ports_count': 4,
'spatial_dims': None,
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW',
}

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.pad import AttributedPad
@@ -15,7 +14,7 @@ class PadFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
pads = np.array(list(attrs.tuple('pad_width', int, None)))
pads = mo_array(list(attrs.tuple('pad_width', int, None)))
pads = pads.reshape([-1, 2])
value = attrs.float('constant_value', 0.0)

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.pooling import Pooling
@@ -23,16 +24,16 @@ class PoolingFrontExtractor(FrontExtractorOp):
rt = 'floor'
data = {
'window': np.array([1, 1, *[k for k in kernel]], dtype=np.int64),
'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),
'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),
'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),
'window': int64_array([1, 1, *[k for k in kernel]]),
'stride': int64_array([1, 1, *[s for s in stride]]),
'pad': int64_array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]]),
'pad_spatial_shape': int64_array([[pad, pad] for pad in padding]),
'pool_method': method,
'exclude_pad': False,
'output_spatial_shape': None,
'spatial_dims': None,
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW',
'rounding_type': rt,
}

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
@@ -25,9 +24,9 @@ class ProposalFrontExtractor(FrontExtractorOp):
update_attrs = {
'feat_stride': feat_stride,
'ratio': np.array(ratio),
'ratio': mo_array(ratio),
'min_size': min_size,
'scale': np.array(scale),
'scale': mo_array(scale),
'pre_nms_topn': pre_nms_topn,
'post_nms_topn': post_nms_topn,
'nms_thresh': nms_thresh,

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.slice import MXSlice
@@ -16,9 +15,9 @@ class SliceFrontExtractor(FrontExtractorOp):
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
node_attrs = {
'crop_begin': np.array(attrs.tuple("begin", int, ())),
'crop_end': np.array(attrs.tuple("end", int, ())),
'step': np.array(attrs.tuple("step", int, ())),
'crop_begin': mo_array(attrs.tuple("begin", int, ())),
'crop_end': mo_array(attrs.tuple("end", int, ())),
'step': mo_array(attrs.tuple("step", int, ())),
}
MXSlice.update_node_stat(node, node_attrs)

View File

@@ -17,11 +17,11 @@ class MXSliceToStridedSliceReplacer(FrontReplacementOp):
node = match['op']
strided_slice_node = StridedSlice(graph, dict(name=node.id + '/strided_slice_',
shrink_axis_mask=np.array(np.zeros(len(node.crop_begin), dtype=np.int64)),
new_axis_mask=np.array(np.zeros(len(node.crop_begin), dtype=np.int64)),
ellipsis_mask=np.array(np.zeros(len(node.crop_begin), dtype=np.int64)),
begin_mask=np.array(np.ones(len(node.crop_begin), dtype=np.int64)),
end_mask=np.array(np.ones(len(node.crop_end), dtype=np.int64)))).create_node()
shrink_axis_mask=np.zeros(len(node.crop_begin), dtype=np.int64),
new_axis_mask=np.zeros(len(node.crop_begin), dtype=np.int64),
ellipsis_mask=np.zeros(len(node.crop_begin), dtype=np.int64),
begin_mask=np.ones(len(node.crop_begin), dtype=np.int64),
end_mask=np.ones(len(node.crop_end), dtype=np.int64))).create_node()
node.in_port(0).get_connection().set_destination(strided_slice_node.in_port(0))
node.out_port(0).get_connection().set_source(strided_slice_node.out_port(0))

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
from openvino.tools.mo.graph.graph import Graph
@@ -26,7 +25,7 @@ class SoftmaxFrontReplacementSubgraph(FrontReplacementSubgraph):
in_node = node.in_node()
out_nodes = [node for node in node.out_nodes().values()]
graph.remove_edge(node.in_node().id, node.id)
temperature = np.array([1.0 / node.temperature])
temperature = mo_array([1.0 / node.temperature])
scalar_value_op = Const(graph, dict(value=temperature, shape=temperature.shape,
symbol_dict={'name': node.id + '/const'}))
mul_op = Mul(graph, dict(name=node.id + '/mul_', symbol_dict={'name': node.id + '/mul_'}))

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.mxnet.eltwise_scalar_replacers import MulScalarFrontReplacer
from openvino.tools.mo.front.mxnet.ssd_detection_output_replacer import SsdPatternDetectionOutputReplacer
from openvino.tools.mo.front.split_normalizer import AttributedSplitToSplit
@@ -76,7 +75,7 @@ class SsdPatternAnchorReshape(FrontReplacementSubgraph):
if v in variants_dict.keys():
variants_dict[v] = Node(graph, k).in_nodes()[1].value[0]
variants = np.array([variants_dict['mul_scalar1x'], variants_dict['mul_scalar1y'],
variants = mo_array([variants_dict['mul_scalar1x'], variants_dict['mul_scalar1y'],
variants_dict['mul_scalar2x'], variants_dict['mul_scalar2y']] * int(const.value.size / 4)).reshape(const.value.shape)
priorbox_variants = Const(graph, dict(value=variants, name=const.id + '/priorbox_variants')).create_node()
variants_slice_like = SliceLike(graph, dict(axes=slice_like.axes,

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.transpose import Transpose
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
@@ -16,5 +17,5 @@ class TransposeFrontExtractor(FrontExtractorOp):
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
order = list(attrs.tuple("axes", int, None))
Transpose.update_node_stat(node, {'order': np.array(order, dtype=np.int32)})
Transpose.update_node_stat(node, {'order': mo_array(order, dtype=np.int32)})
return cls.enabled

View File

@@ -16,6 +16,11 @@ class ZerosFrontExtractor(FrontExtractorOp):
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
shape = list(attrs.tuple('shape', int, None))
dtype = attrs.tuple('dtype', str, None)
if dtype and len(dtype) == 1:
dtype = dtype[0]
else:
dtype = np.float32
zero_shapes = []
for i, s in enumerate(shape):
if s == 0:
@@ -24,7 +29,7 @@ class ZerosFrontExtractor(FrontExtractorOp):
update_attrs = {
'shape': np.ndarray(shape),
'value': np.zeros(shape),
'value': np.zeros(shape, dtype=dtype),
'zero_shapes': zero_shapes
}

View File

@@ -6,7 +6,7 @@ import numpy as np
from openvino.tools.mo.front.pass_separator import FrontStart
from openvino.tools.mo.front.restore_ports import RestorePorts
from openvino.tools.mo.ops.loop import Loop
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
from openvino.tools.mo.graph.graph import Graph, Node
@@ -39,7 +39,7 @@ class ONNXLoopNormalize(FrontReplacementSubgraph):
# connect "execution condition" input if it is not connected with default value True
if not loop_node.is_in_port_connected(1):
loop_node.add_input_port(1, skip_if_exist=True)
Const(loop_node.graph, {'name': loop_name + '/execution_cond', 'value': np.array(True, dtype=np.bool)}).\
Const(loop_node.graph, {'name': loop_name + '/execution_cond', 'value': mo_array(True, dtype=np.bool)}).\
create_node().out_port(0).connect(loop_node.in_port(1))
# scan output need Unsqueeze over axis 0

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -14,7 +13,7 @@ class AffineFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
dst_type = lambda x: np.array(x)
dst_type = lambda x: mo_array(x)
scale = onnx_attr(node, 'alpha', 'f', default=None, dst_type=dst_type)
bias = onnx_attr(node, 'beta', 'f', default=None, dst_type=dst_type)

View File

@@ -4,6 +4,7 @@
import numpy as np
from onnx import numpy_helper
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.constant_of_shape import ConstantOfShape
@@ -15,7 +16,7 @@ class ConstantOfShapeExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
fill_value = onnx_attr(node, 'value', 't', default=np.array([0.0]), dst_type=lambda x: numpy_helper.to_array(x))
fill_value = onnx_attr(node, 'value', 't', default=mo_array([0.0]), dst_type=lambda x: numpy_helper.to_array(x))
ConstantOfShape.update_node_stat(node, {'fill_value': fill_value})
return cls.enabled

View File

@@ -18,27 +18,27 @@ class ConvFrontExtractor(FrontExtractorOp):
def extract(cls, node):
# Extract pads attribute
# In case if pads is not specified it will be set in default (1) in infer function
pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: int64_array(x))
assert pads is None or len(pads) % 2 == 0
final_pad = None
if pads is not None:
pads = pads.reshape([2, -1])
pads = np.transpose(pads)
final_pad = np.array([[0, 0], [0, 0], *pads], dtype=np.int64)
final_pad = int64_array([[0, 0], [0, 0], *pads])
# Extract dilations attribute
# In case if dilations is not specified it will be set in default (1) in infer function
dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
final_dilations = np.array([1, 1, *dilations], dtype=np.int64) if dilations is not None else None
dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: int64_array(x))
final_dilations = int64_array([1, 1, *dilations]) if dilations is not None else None
# Extract dilations attribute
# In case if dilations is not specified it will be set in default (1) in infer function
strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
final_strides = np.array([1, 1, *strides], dtype=np.int64) if strides is not None else None
strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: int64_array(x))
final_strides = int64_array([1, 1, *strides]) if strides is not None else None
kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None)
auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad)
group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: np.array(x, dtype=np.int64))
group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: int64_array(x))
attrs = {
'op': __class__.op,
@@ -46,22 +46,22 @@ class ConvFrontExtractor(FrontExtractorOp):
'bias_addable': True,
'bias_term': None,
'pad': final_pad,
'pad_spatial_shape': np.array(pads, dtype=np.int64) if pads is not None else None,
'pad_spatial_shape': int64_array(pads) if pads is not None else None,
'dilation': final_dilations,
'output_spatial_shape': None,
'output_shape': None,
'stride': final_strides,
'group': group,
'output': None,
'kernel_spatial': np.array(kernel_shape, dtype=np.int64) if kernel_shape is not None else None,
'kernel_spatial': int64_array(kernel_shape) if kernel_shape is not None else None,
'input_feature_channel': 1,
'output_feature_channel': 0,
'kernel_spatial_idx': None, # Will be calculated in infer function (np.array([2, 3]))
'spatial_dims': None, # Will be calculated in infer function
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW'
}

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_autopad
from openvino.tools.mo.ops.deformable_convolution import DeformableConvolution
@@ -16,27 +17,27 @@ class DeformableConvExtractor(FrontExtractorOp):
def extract(cls, node):
# Extract pads attribute
# In case if pads is not specified it will be set in default (1) in infer function
pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: int64_array(x))
assert pads is None or len(pads) % 2 == 0
final_pad = None
if pads is not None:
pads = pads.reshape([2, -1])
pads = np.transpose(pads)
final_pad = np.array([[0, 0], [0, 0], *pads], dtype=np.int64)
final_pad = int64_array([[0, 0], [0, 0], *pads])
# Extract dilations attribute
# In case if dilations is not specified it will be set in default (1) in infer function
dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
final_dilations = np.array([1, 1, *dilations], dtype=np.int64) if dilations is not None else None
dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: int64_array(x))
final_dilations = int64_array([1, 1, *dilations]) if dilations is not None else None
# Extract dilations attribute
# In case if dilations is not specified it will be set in default (1) in infer function
strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
final_strides = np.array([1, 1, *strides], dtype=np.int64) if strides is not None else None
strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: int64_array(x))
final_strides = int64_array([1, 1, *strides]) if strides is not None else None
kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None)
auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad)
group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: np.array(x, dtype=np.int64))
group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: int64_array(x))
deformable_groups = onnx_attr(node, 'deformable_groups', 'i', default=1)
attrs = {
@@ -45,7 +46,7 @@ class DeformableConvExtractor(FrontExtractorOp):
'bias_addable': False,
'bias_term': False,
'pad': final_pad,
'pad_spatial_shape': np.array(pads, dtype=np.int64) if pads is not None else None,
'pad_spatial_shape': int64_array(pads) if pads is not None else None,
'dilation': final_dilations,
'output_spatial_shape': None,
'output_shape': None,
@@ -54,15 +55,15 @@ class DeformableConvExtractor(FrontExtractorOp):
'deformable_group': deformable_groups,
'output': None,
'weights_index': 2,
'kernel_spatial': np.array(kernel_shape, dtype=np.int64) if kernel_shape is not None else None,
'kernel_spatial': int64_array(kernel_shape) if kernel_shape is not None else None,
'input_feature_channel': 1,
'output_feature_channel': 0,
'kernel_spatial_idx': None, # Will be calculated in infer function (np.array([2, 3]))
'spatial_dims': None, # Will be calculated in infer function
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW'
}

View File

@@ -3,8 +3,7 @@
from math import log
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.detection_output_onnx import ExperimentalDetectronDetectionOutput
@@ -23,8 +22,7 @@ class ExperimentalDetectronDetectionOutputFrontExtractor(FrontExtractorOp):
post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000),
score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05),
max_delta_log_wh=onnx_attr(node, 'max_delta_log_wh', 'f', log(1000. / 16.)),
deltas_weights=np.array(onnx_attr(node, 'deltas_weights', 'floats', [10., 10., 5., 5.]),
dtype=np.float32)
deltas_weights=float32_array(onnx_attr(node, 'deltas_weights', 'floats', [10., 10., 5., 5.]))
)
ExperimentalDetectronDetectionOutput.update_node_stat(node, attrs)
return cls.enabled

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.elementwise import Add, Sub, Mul, Div, Pow, Less, Equal, Greater, LogicalAnd, LogicalOr, LogicalXor, \
Round, GreaterEqual, LessEqual
from openvino.tools.mo.front.extractor import FrontExtractorOp
@@ -103,7 +102,7 @@ class ScaleFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node: Node):
scale = onnx_attr(node, 'scale', 'f', default=np.array(1.0), dst_type=lambda x: np.array(x))
scale = onnx_attr(node, 'scale', 'f', default=mo_array(1.0), dst_type=lambda x: mo_array(x))
AttributedPower.update_node_stat(node, {'scale': scale})
return cls.enabled

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.ops.gather import AttributedGather
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -15,7 +14,7 @@ class GatherFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = {
'axis': np.array(onnx_attr(node, 'axis', 'i', default=0), dtype=np.int64)
'axis': int64_array(onnx_attr(node, 'axis', 'i', default=0))
}
AttributedGather.update_node_stat(node, attrs)

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.ops.MatMul import GemmONNX
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -21,7 +20,7 @@ class GemmFrontExtractor(FrontExtractorOp):
'transpose_b': onnx_attr(node, 'transB', 'i', 0),
'broadcast_c': onnx_attr(node, 'broadcast', 'i', 1),
# TODO: there is no axis in onnx operators.md
'axis': np.array(onnx_attr(node, 'axis', 'i', default=0), dtype=np.int64)
'axis': int64_array(onnx_attr(node, 'axis', 'i', default=0))
}
GemmONNX.update_node_stat(node, attrs)
return cls.enabled

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.group_norm import GroupNorm
@@ -15,8 +16,8 @@ class ExperimentalDetectronGroupNorm(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = {
'eps': np.array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
'num_groups': np.array(onnx_attr(node, 'num_groups', 'i', default=1), dtype=np.int64),
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)),
}
GroupNorm.update_node_stat(node, attrs)
return cls.enabled
@@ -29,8 +30,8 @@ class GroupNormExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = {
'eps': np.array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
'num_groups': np.array(onnx_attr(node, 'num_groups', 'i', default=1), dtype=np.int64),
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)),
}
GroupNorm.update_node_stat(node, attrs)
return cls.enabled

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array, int64_array
from openvino.tools.mo.ops.GRU import GRU
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -15,9 +16,9 @@ class GRUFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
activation_alpha = onnx_attr(node, 'activation_alpha', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
default=None, dst_type=lambda x: float32_array(x))
activation_beta = onnx_attr(node, 'activation_beta', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
default=None, dst_type=lambda x: float32_array(x))
activations = onnx_attr(node, 'activations', 'strings', default=None,
dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x))))
clip = onnx_attr(node, 'clip', 'f', default=None)
@@ -39,7 +40,7 @@ class GRUFrontExtractor(FrontExtractorOp):
'activations': activations,
'clip': clip,
'direction': onnx_attr(node, 'direction', 's', b'forward').decode().lower(),
'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64),
'hidden_size': int64_array(onnx_attr(node, 'hidden_size', 'i')),
'linear_before_reset': linear_before_reset,
}

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.hard_sigmoid import HardSigmoid
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -18,7 +17,7 @@ class HardSigmoidFrontExtractor(FrontReplacementOp):
alpha = onnx_attr(node, 'alpha', 'f', default=0.2)
beta = onnx_attr(node, 'beta', 'f', default=0.5)
hard_sigmoid = create_op_with_const_inputs(graph, HardSigmoid, {1: np.array(alpha), 2: np.array(beta)},
hard_sigmoid = create_op_with_const_inputs(graph, HardSigmoid, {1: mo_array(alpha), 2: mo_array(beta)},
{'name': node.name + '/HardSigmoid_'})
node.in_port(0).get_connection().set_destination(hard_sigmoid.in_port(0))

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -13,9 +14,9 @@ class ImageScalerFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
dst_type = lambda x: np.array(x)
dst_type = lambda x: mo_array(x)
scale = onnx_attr(node, 'scale', 'f', default=np.array(1.0), dst_type=dst_type)
scale = onnx_attr(node, 'scale', 'f', default=mo_array(1.0), dst_type=dst_type)
bias = onnx_attr(node, 'bias', 'floats', default=None, dst_type=dst_type)
# Expand dims for bias in case if it is not scalar

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array, int64_array
from openvino.tools.mo.ops.LSTM import LSTM
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -15,9 +14,9 @@ class LSTMFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
activation_alpha = onnx_attr(node, 'activation_alpha', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
default=None, dst_type=lambda x: float32_array(x))
activation_beta = onnx_attr(node, 'activation_beta', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
default=None, dst_type=lambda x: float32_array(x))
activations = onnx_attr(node, 'activations', 'strings', default=None,
dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x))))
clip = onnx_attr(node, 'clip', 'f', default=None)
@@ -39,7 +38,7 @@ class LSTMFrontExtractor(FrontExtractorOp):
'activations': activations,
'clip': clip,
'direction': onnx_attr(node, 'direction', 's', b'forward').decode().lower(),
'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64),
'hidden_size': int64_array(onnx_attr(node, 'hidden_size', 'i')),
'input_forget': input_forget,
}

View File

@@ -8,7 +8,7 @@ from openvino.tools.mo.ops.Cast import Cast
from openvino.tools.mo.ops.detection_output_onnx import ExperimentalDetectronDetectionOutput
from openvino.tools.mo.ops.parameter import Parameter
from openvino.tools.mo.ops.roifeatureextractor_onnx import ExperimentalDetectronROIFeatureExtractor
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral
from openvino.tools.mo.graph.graph import Graph
@@ -53,7 +53,7 @@ def insert_do(graph: Graph, replacement_descriptions: dict):
do_node = ExperimentalDetectronDetectionOutput(graph, {'name': 'DetectionOutput',
'class_agnostic_box_regression': 0,
'deltas_weights': np.array([10.0, 10.0, 5.0, 5.0]),
'deltas_weights': mo_array([10.0, 10.0, 5.0, 5.0]),
'max_delta_log_wh':
replacement_descriptions['max_delta_log_wh'],
'nms_threshold': replacement_descriptions['nms_threshold'],

View File

@@ -1,8 +1,6 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.mvn import MVNOnnx
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
@@ -17,7 +15,7 @@ class MeanVarianceNormalizationExtractor(FrontExtractorOp):
def extract(cls, node):
axes = onnx_attr(node, 'axes', 'ints',
default=int64_array([0, 2, 3]),
dst_type=lambda x: np.array(x, dtype=np.int64))
dst_type=lambda x: int64_array(x))
attrs = {
'eps': 1e-9,

View File

@@ -67,10 +67,10 @@ class GlobalMaxPoolFrontExtractor(FrontExtractorOp):
def common_onnx_pool_extractor(node):
kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
final_kernel_shape = np.array([1, 1, *[x for x in kernel_shape]], dtype=np.int64) if kernel_shape is not None else None
kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None, dst_type=lambda x: int64_array(x))
final_kernel_shape = int64_array([1, 1, *[x for x in kernel_shape]]) if kernel_shape is not None else None
pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: int64_array(x))
if kernel_shape is not None and pads is not None and kernel_shape.size * 2 != pads.size:
log.warning('Node {} has pad = {} which is ill-formed -- it should have even amount of elements.'.format(
@@ -86,12 +86,12 @@ def common_onnx_pool_extractor(node):
assert len(pads) % 2 == 0
pads = pads.reshape([2, -1])
pads = np.transpose(pads)
final_pads = np.array([[0, 0], [0, 0], *[p for p in pads]], dtype=np.int64)
final_pads = int64_array([[0, 0], [0, 0], *[p for p in pads]])
# Extract strides attribute
# In case if strides is not specified it will be set in default (1) in infer function
strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
final_strides = np.array([1, 1, *[x for x in strides]], dtype=np.int64) if strides is not None else None
strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: int64_array(x))
final_strides = int64_array([1, 1, *[x for x in strides]]) if strides is not None else None
dilation = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: int64_array(x))
final_dilation = int64_array([1, 1, *[x for x in dilation]]) if dilation is not None else None
@@ -121,7 +121,7 @@ def common_onnx_pool_extractor(node):
'window': final_kernel_shape,
'stride': final_strides,
'pad': final_pads,
'pad_spatial_shape': np.array(pads, dtype=np.int64) if pads is not None else None,
'pad_spatial_shape': int64_array(pads) if pads is not None else None,
'pool_method': method,
'exclude_pad': True if exclude_pad else False,
'global_pool': global_pooling,
@@ -130,8 +130,8 @@ def common_onnx_pool_extractor(node):
'dilation': final_dilation,
'spatial_dims': None,
'channel_dims': np.array([1], dtype=np.int64),
'batch_dims': np.array([0], dtype=np.int64),
'channel_dims': int64_array([1]),
'batch_dims': int64_array([0]),
'layout': 'NCHW',
'pooling_convention': pooling_convention

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array
from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -14,13 +13,13 @@ class PriorBoxClusteredFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: np.array(x, dtype=np.float32))
variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: float32_array(x))
if len(variance) == 0:
variance = [0.1]
update_attrs = {
'width': onnx_attr(node, 'width', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'height': onnx_attr(node, 'height', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'width': onnx_attr(node, 'width', 'floats', dst_type=lambda x: float32_array(x)),
'height': onnx_attr(node, 'height', 'floats', dst_type=lambda x: float32_array(x)),
'flip': onnx_attr(node, 'flip', 'i', default=0),
'clip': onnx_attr(node, 'clip', 'i', default=0),
'variance': list(variance),

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array
from openvino.tools.mo.ops.priorbox import PriorBoxOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -14,14 +13,14 @@ class PriorBoxFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: np.array(x, dtype=np.float32))
variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: float32_array(x))
if len(variance) == 0:
variance = [0.1]
update_attrs = {
'aspect_ratio': onnx_attr(node, 'aspect_ratio', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'min_size': onnx_attr(node, 'min_size', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'max_size': onnx_attr(node, 'max_size', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)),
'aspect_ratio': onnx_attr(node, 'aspect_ratio', 'floats', dst_type=lambda x: float32_array(x)),
'min_size': onnx_attr(node, 'min_size', 'floats', dst_type=lambda x: float32_array(x)),
'max_size': onnx_attr(node, 'max_size', 'floats', dst_type=lambda x: float32_array(x)),
'flip': onnx_attr(node, 'flip', 'i', default=0),
'clip': onnx_attr(node, 'clip', 'i', default=0),
'variance': list(variance),

View File

@@ -1,8 +1,6 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.reshape import Reshape
@@ -15,7 +13,7 @@ class ReshapeFrontExtractor(FrontExtractorOp):
def extract(cls, node):
dim = onnx_attr(node, 'shape', 'ints', None)
if dim is not None:
dim = np.array(dim, dtype=np.int64)
dim = int64_array(dim)
Reshape.update_node_stat(node, {'dim': dim})
else:
Reshape.update_node_stat(node)

View File

@@ -3,6 +3,7 @@
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array, int64_array
from openvino.tools.mo.ops.RNN import RNN
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -17,9 +18,9 @@ class RNNFrontExtractor(FrontExtractorOp):
direction = onnx_attr(node, 'direction', 's', b'forward').decode().lower()
activation_alpha = onnx_attr(node, 'activation_alpha', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
default=None, dst_type=lambda x: float32_array(x))
activation_beta = onnx_attr(node, 'activation_beta', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
default=None, dst_type=lambda x: float32_array(x))
activations = onnx_attr(node, 'activations', 'strings',
default=['tanh', 'tanh'] if direction == 'bidirectional' else ['tanh'],
dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x))))
@@ -45,7 +46,7 @@ class RNNFrontExtractor(FrontExtractorOp):
'activations': activations,
'clip': clip,
'direction': direction,
'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64),
'hidden_size': int64_array(onnx_attr(node, 'hidden_size', 'i')),
}
RNN.update_node_stat(node, attrs)

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.ops.roifeatureextractor_onnx import ExperimentalDetectronROIFeatureExtractor
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
@@ -20,8 +19,7 @@ class ExperimentalDetectronROIFeatureExtractorFrontExtractor(FrontExtractorOp):
num_classes=onnx_attr(node, 'num_classes', 'i', 81),
post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000),
score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05),
pyramid_scales=np.array(onnx_attr(node, 'pyramid_scales', 'ints', [4, 8, 16, 32, 64]),
dtype=np.int64),
pyramid_scales=int64_array(onnx_attr(node, 'pyramid_scales', 'ints', [4, 8, 16, 32, 64])),
)
ExperimentalDetectronROIFeatureExtractor.update_node_stat(node, attrs)

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.squeeze import Squeeze
@@ -14,7 +13,7 @@ class SqueezeFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
axis = np.array(onnx_attr(node, 'axes', 'ints', default=[]), dtype=np.int64)
axis = int64_array(onnx_attr(node, 'axes', 'ints', default=[]))
attrs = {
'squeeze_dims': axis if len(axis) != 0 else None

View File

@@ -1,8 +1,7 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.expand_dims import ExpandDims
@@ -17,7 +16,7 @@ class UnsqueezeFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
axis = np.array(onnx_attr(node, 'axes', 'ints', default=[]), dtype=np.int64)
axis = int64_array(onnx_attr(node, 'axes', 'ints', default=[]))
ExpandDims.update_node_stat(node, {'expand_axis': axis})
return cls.enabled

View File

@@ -3,8 +3,7 @@
import math
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import float32_array
from openvino.tools.mo.ops.ONNXResize10 import ONNXResize10
from openvino.tools.mo.ops.upsample import UpsampleOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
@@ -24,7 +23,7 @@ class UpsampleFrontExtractor(FrontExtractorOp):
ONNXResize10.update_node_stat(node, {'mode': mode})
else:
mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32))
scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: float32_array(x))
width_scale = onnx_attr(node, 'width_scale', 'f')
height_scale = onnx_attr(node, 'height_scale', 'f')

Some files were not shown because too many files have changed in this diff Show More