diff --git a/src/frontends/tensorflow/src/op/broadcast_args.cpp b/src/frontends/tensorflow/src/op/broadcast_args.cpp new file mode 100644 index 00000000000..f0321dcdc02 --- /dev/null +++ b/src/frontends/tensorflow/src/op/broadcast_args.cpp @@ -0,0 +1,49 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op_table.hpp" +#include "openvino/opsets/opset8.hpp" + +using namespace std; +using namespace ov::opset8; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_broadcast_args_op(const NodeContext& node) { + default_op_checks(node, 2, {"BroadcastArgs"}); + auto s0 = node.get_input(0); + auto s1 = node.get_input(1); + + // compute a number of shape elements to append for broadcasting + auto size0 = make_shared(make_shared(s0)); + auto size1 = make_shared(make_shared(s1)); + auto max_size = make_shared(size0, size1); + auto diff1 = make_shared(max_size, size0); + auto diff2 = make_shared(max_size, size1); + + // pad the shortest shape value with minus ones + // to take dynamic shapes into account + auto padded_s0 = + make_shared(s0, + make_shared(diff1->get_element_type(), Shape{1}, std::vector{0}), + diff1, + make_shared(s0.get_element_type(), Shape{}, std::vector{-1}), + ov::op::PadMode::CONSTANT); + auto padded_s1 = + make_shared(s1, + make_shared(diff2->get_element_type(), Shape{1}, std::vector{0}), + diff2, + make_shared(s1.get_element_type(), Shape{}, std::vector{-1}), + ov::op::PadMode::CONSTANT); + + auto broadcasted_shape = make_shared(padded_s0, padded_s1); + set_node_name(node.get_name(), broadcasted_shape); + return {broadcasted_shape}; +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/tensorflow/src/op/broadcast_to.cpp b/src/frontends/tensorflow/src/op/broadcast_to.cpp new file mode 100644 index 00000000000..3574574bcf2 --- /dev/null +++ b/src/frontends/tensorflow/src/op/broadcast_to.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op_table.hpp" +#include "openvino/opsets/opset8.hpp" + +using namespace std; +using namespace ov::opset8; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_broadcast_to_op(const NodeContext& node) { + default_op_checks(node, 2, {"BroadcastTo"}); + auto input = node.get_input(0); + auto shape = node.get_input(1); + auto broadcast_to = make_shared(input, shape); + set_node_name(node.get_name(), broadcast_to); + return {broadcast_to}; +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow/src/op/bucketize.cpp b/src/frontends/tensorflow/src/op/bucketize.cpp new file mode 100644 index 00000000000..5919d6c51ac --- /dev/null +++ b/src/frontends/tensorflow/src/op/bucketize.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op_table.hpp" +#include "openvino/opsets/opset8.hpp" + +using namespace std; +using namespace ov::opset8; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_bucketize_op(const NodeContext& node) { + default_op_checks(node, 1, {"Bucketize"}); + auto input = node.get_input(0); + + // retrieve attribute + auto boundaries = node.get_attribute>("boundaries"); + + auto bucketize = + make_shared(input, + make_shared(ov::element::f32, Shape{boundaries.size()}, boundaries), + ov::element::i32, + false); + set_node_name(node.get_name(), bucketize); + return {bucketize}; +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow/src/op/einsum.cpp b/src/frontends/tensorflow/src/op/einsum.cpp new file mode 100644 index 00000000000..0a97c1ef6b9 --- /dev/null +++ b/src/frontends/tensorflow/src/op/einsum.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op_table.hpp" +#include "openvino/opsets/opset8.hpp" + +using namespace std; +using namespace ov::opset8; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_einsum_op(const NodeContext& node) { + auto op_type = node.get_op_type(); + TENSORFLOW_OP_VALIDATION(node, op_type == "Einsum", "Internal error: incorrect usage of translate_einsum_op."); + auto equation = node.get_attribute("equation"); + + OutputVector inputs; + for (size_t input_ind = 0; input_ind < node.get_input_size(); ++input_ind) { + inputs.push_back(node.get_input(input_ind)); + } + + auto einsum = make_shared(inputs, equation); + set_node_name(node.get_name(), einsum); + return {einsum}; +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow/src/op/reduce.cpp b/src/frontends/tensorflow/src/op/reduce.cpp index 99a5a4fbb4c..a4b44abaef1 100644 --- a/src/frontends/tensorflow/src/op/reduce.cpp +++ b/src/frontends/tensorflow/src/op/reduce.cpp @@ -45,6 +45,7 @@ template OutputVector translate_direct_reduce_op(const NodeContext& template OutputVector translate_direct_reduce_op(const NodeContext& node); template OutputVector translate_direct_reduce_op(const NodeContext& node); template OutputVector translate_direct_reduce_op(const NodeContext& node); +template OutputVector translate_direct_reduce_op(const NodeContext& node); } // namespace op } // namespace tensorflow } // namespace frontend diff --git a/src/frontends/tensorflow/src/op/unary_op.cpp b/src/frontends/tensorflow/src/op/unary_op.cpp index 798b60bcf99..f8ab24bdd53 100644 --- a/src/frontends/tensorflow/src/op/unary_op.cpp +++ b/src/frontends/tensorflow/src/op/unary_op.cpp @@ -38,10 +38,12 @@ template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); +template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); +template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); template OutputVector translate_unary_op(const NodeContext& node); diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index f96060cd3a6..bc7017f38a5 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -28,6 +28,9 @@ OP_CONVERTER(translate_avg_pool_op); OP_CONVERTER(translate_batch_mat_mul_op); OP_CONVERTER(translate_batch_nd_and_space_nd_op); OP_CONVERTER(translate_bias_add_op); +OP_CONVERTER(translate_broadcast_args_op); +OP_CONVERTER(translate_broadcast_to_op); +OP_CONVERTER(translate_bucketize_op); OP_CONVERTER(translate_cast_op); OP_CONVERTER(translate_concat_op); OP_CONVERTER(translate_const_op); @@ -39,6 +42,7 @@ OP_CONVERTER(translate_cumsum_op); OP_CONVERTER(translate_crop_and_resize_op); OP_CONVERTER(translate_depth_to_space_op); OP_CONVERTER(translate_depthwise_conv_2d_native_op); +OP_CONVERTER(translate_einsum_op); OP_CONVERTER(translate_elu_op); OP_CONVERTER(translate_expand_dims_op); OP_CONVERTER(translate_extract_image_patches_op); @@ -115,10 +119,12 @@ const std::map get_supported_ops() { {"Ceil", translate_unary_op}, {"Cos", translate_unary_op}, {"Cosh", translate_unary_op}, + {"Erf", translate_unary_op}, {"Exp", translate_unary_op}, {"Floor", translate_unary_op}, {"Log", translate_unary_op}, {"LogicalNot", translate_unary_op}, + {"Mish", translate_unary_op}, {"Neg", translate_unary_op}, {"Relu", translate_unary_op}, {"Sigmoid", translate_unary_op}, @@ -155,6 +161,7 @@ const std::map get_supported_ops() { // note: ReduceOp translator declaration for each op must to be added in reduce.cpp file {"Any", translate_direct_reduce_op}, {"All", translate_direct_reduce_op}, + {"EuclideanNorm", translate_direct_reduce_op}, {"Max", translate_direct_reduce_op}, {"Mean", translate_direct_reduce_op}, {"Min", translate_direct_reduce_op}, @@ -170,6 +177,9 @@ const std::map get_supported_ops() { {"BatchMatMul", translate_batch_mat_mul_op}, {"BatchMatMulV2", translate_batch_mat_mul_op}, {"BatchToSpaceND", translate_batch_nd_and_space_nd_op}, + {"BroadcastArgs", translate_broadcast_args_op}, + {"BroadcastTo", translate_broadcast_to_op}, + {"Bucketize", translate_bucketize_op}, {"BiasAdd", translate_bias_add_op}, {"Cast", translate_cast_op}, {"Concat", translate_concat_op}, @@ -183,6 +193,7 @@ const std::map get_supported_ops() { {"Cumsum", translate_cumsum_op}, {"DepthToSpace", translate_depth_to_space_op}, {"DepthwiseConv2dNative", translate_depthwise_conv_2d_native_op}, + {"Einsum", translate_einsum_op}, {"Elu", translate_elu_op}, {"ExpandDims", translate_expand_dims_op}, {"ExtractImagePatches", translate_extract_image_patches_op},