[TF FE] Add loaders: EuclidianNorm, Erf, Bucketize, BroadcastTo, BroadcastArgs, Einsum, Mish (#12882)

* [TF FE] Add loaders: EuclidianNorm, Erf, Bucketize, BroadcastTo, BroadcastArgs

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix a name for translate_broadcast_args_op

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Add loaders for Einsum and Mish

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev 2022-09-05 11:51:20 +03:00 committed by GitHub
parent 27c1c6a3d1
commit 666d53e909
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 154 additions and 0 deletions

View File

@ -0,0 +1,49 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op_table.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ov::opset8;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_broadcast_args_op(const NodeContext& node) {
default_op_checks(node, 2, {"BroadcastArgs"});
auto s0 = node.get_input(0);
auto s1 = node.get_input(1);
// compute a number of shape elements to append for broadcasting
auto size0 = make_shared<Squeeze>(make_shared<ShapeOf>(s0));
auto size1 = make_shared<Squeeze>(make_shared<ShapeOf>(s1));
auto max_size = make_shared<Maximum>(size0, size1);
auto diff1 = make_shared<Subtract>(max_size, size0);
auto diff2 = make_shared<Subtract>(max_size, size1);
// pad the shortest shape value with minus ones
// to take dynamic shapes into account
auto padded_s0 =
make_shared<Pad>(s0,
make_shared<Constant>(diff1->get_element_type(), Shape{1}, std::vector<int64_t>{0}),
diff1,
make_shared<Constant>(s0.get_element_type(), Shape{}, std::vector<int64_t>{-1}),
ov::op::PadMode::CONSTANT);
auto padded_s1 =
make_shared<Pad>(s1,
make_shared<Constant>(diff2->get_element_type(), Shape{1}, std::vector<int64_t>{0}),
diff2,
make_shared<Constant>(s1.get_element_type(), Shape{}, std::vector<int64_t>{-1}),
ov::op::PadMode::CONSTANT);
auto broadcasted_shape = make_shared<Maximum>(padded_s0, padded_s1);
set_node_name(node.get_name(), broadcasted_shape);
return {broadcasted_shape};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op_table.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ov::opset8;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_broadcast_to_op(const NodeContext& node) {
default_op_checks(node, 2, {"BroadcastTo"});
auto input = node.get_input(0);
auto shape = node.get_input(1);
auto broadcast_to = make_shared<Broadcast>(input, shape);
set_node_name(node.get_name(), broadcast_to);
return {broadcast_to};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op_table.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ov::opset8;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_bucketize_op(const NodeContext& node) {
default_op_checks(node, 1, {"Bucketize"});
auto input = node.get_input(0);
// retrieve attribute
auto boundaries = node.get_attribute<std::vector<float>>("boundaries");
auto bucketize =
make_shared<Bucketize>(input,
make_shared<Constant>(ov::element::f32, Shape{boundaries.size()}, boundaries),
ov::element::i32,
false);
set_node_name(node.get_name(), bucketize);
return {bucketize};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op_table.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ov::opset8;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_einsum_op(const NodeContext& node) {
auto op_type = node.get_op_type();
TENSORFLOW_OP_VALIDATION(node, op_type == "Einsum", "Internal error: incorrect usage of translate_einsum_op.");
auto equation = node.get_attribute<std::string>("equation");
OutputVector inputs;
for (size_t input_ind = 0; input_ind < node.get_input_size(); ++input_ind) {
inputs.push_back(node.get_input(input_ind));
}
auto einsum = make_shared<Einsum>(inputs, equation);
set_node_name(node.get_name(), einsum);
return {einsum};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -45,6 +45,7 @@ template OutputVector translate_direct_reduce_op<ReduceMean>(const NodeContext&
template OutputVector translate_direct_reduce_op<ReduceMin>(const NodeContext& node);
template OutputVector translate_direct_reduce_op<ReduceProd>(const NodeContext& node);
template OutputVector translate_direct_reduce_op<ReduceSum>(const NodeContext& node);
template OutputVector translate_direct_reduce_op<ReduceL2>(const NodeContext& node);
} // namespace op
} // namespace tensorflow
} // namespace frontend

View File

@ -38,10 +38,12 @@ template OutputVector translate_unary_op<Atanh>(const NodeContext& node);
template OutputVector translate_unary_op<Ceiling>(const NodeContext& node);
template OutputVector translate_unary_op<Cos>(const NodeContext& node);
template OutputVector translate_unary_op<Cosh>(const NodeContext& node);
template OutputVector translate_unary_op<Erf>(const NodeContext& node);
template OutputVector translate_unary_op<Exp>(const NodeContext& node);
template OutputVector translate_unary_op<Floor>(const NodeContext& node);
template OutputVector translate_unary_op<Log>(const NodeContext& node);
template OutputVector translate_unary_op<LogicalNot>(const NodeContext& node);
template OutputVector translate_unary_op<Mish>(const NodeContext& node);
template OutputVector translate_unary_op<Negative>(const NodeContext& node);
template OutputVector translate_unary_op<Relu>(const NodeContext& node);
template OutputVector translate_unary_op<Sigmoid>(const NodeContext& node);

View File

@ -28,6 +28,9 @@ OP_CONVERTER(translate_avg_pool_op);
OP_CONVERTER(translate_batch_mat_mul_op);
OP_CONVERTER(translate_batch_nd_and_space_nd_op);
OP_CONVERTER(translate_bias_add_op);
OP_CONVERTER(translate_broadcast_args_op);
OP_CONVERTER(translate_broadcast_to_op);
OP_CONVERTER(translate_bucketize_op);
OP_CONVERTER(translate_cast_op);
OP_CONVERTER(translate_concat_op);
OP_CONVERTER(translate_const_op);
@ -39,6 +42,7 @@ OP_CONVERTER(translate_cumsum_op);
OP_CONVERTER(translate_crop_and_resize_op);
OP_CONVERTER(translate_depth_to_space_op);
OP_CONVERTER(translate_depthwise_conv_2d_native_op);
OP_CONVERTER(translate_einsum_op);
OP_CONVERTER(translate_elu_op);
OP_CONVERTER(translate_expand_dims_op);
OP_CONVERTER(translate_extract_image_patches_op);
@ -115,10 +119,12 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"Ceil", translate_unary_op<opset8::Ceiling>},
{"Cos", translate_unary_op<opset8::Cos>},
{"Cosh", translate_unary_op<opset8::Cosh>},
{"Erf", translate_unary_op<opset8::Erf>},
{"Exp", translate_unary_op<opset8::Exp>},
{"Floor", translate_unary_op<opset8::Floor>},
{"Log", translate_unary_op<opset8::Log>},
{"LogicalNot", translate_unary_op<opset8::LogicalNot>},
{"Mish", translate_unary_op<opset8::Mish>},
{"Neg", translate_unary_op<opset8::Negative>},
{"Relu", translate_unary_op<opset8::Relu>},
{"Sigmoid", translate_unary_op<opset8::Sigmoid>},
@ -155,6 +161,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
// note: ReduceOp translator declaration for each op must to be added in reduce.cpp file
{"Any", translate_direct_reduce_op<opset8::ReduceLogicalOr>},
{"All", translate_direct_reduce_op<opset8::ReduceLogicalAnd>},
{"EuclideanNorm", translate_direct_reduce_op<opset8::ReduceL2>},
{"Max", translate_direct_reduce_op<opset8::ReduceMax>},
{"Mean", translate_direct_reduce_op<opset8::ReduceMean>},
{"Min", translate_direct_reduce_op<opset8::ReduceMin>},
@ -170,6 +177,9 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"BatchMatMul", translate_batch_mat_mul_op},
{"BatchMatMulV2", translate_batch_mat_mul_op},
{"BatchToSpaceND", translate_batch_nd_and_space_nd_op},
{"BroadcastArgs", translate_broadcast_args_op},
{"BroadcastTo", translate_broadcast_to_op},
{"Bucketize", translate_bucketize_op},
{"BiasAdd", translate_bias_add_op},
{"Cast", translate_cast_op},
{"Concat", translate_concat_op},
@ -183,6 +193,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"Cumsum", translate_cumsum_op},
{"DepthToSpace", translate_depth_to_space_op},
{"DepthwiseConv2dNative", translate_depthwise_conv_2d_native_op},
{"Einsum", translate_einsum_op},
{"Elu", translate_elu_op},
{"ExpandDims", translate_expand_dims_op},
{"ExtractImagePatches", translate_extract_image_patches_op},