[TF FE] Speed up compilation - part 5

Avoid usage of heavy opsetX header

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Kazantsev, Roman 2023-11-26 12:47:55 +04:00
parent 493a338ad2
commit c8c7066576
9 changed files with 107 additions and 84 deletions

View File

@ -2,11 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/tile.hpp"
#include "common_op_table.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ov::opset8;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -18,7 +19,7 @@ OutputVector translate_tile_op(const NodeContext& node) {
auto input = node.get_input(0);
auto multiples = node.get_input(1);
auto tile = make_shared<Tile>(input, multiples);
auto tile = make_shared<v0::Tile>(input, multiples);
set_node_name(node.get_name(), tile);
return {tile};
}

View File

@ -3,10 +3,10 @@
//
#include "common_op_table.hpp"
#include "openvino/opsets/opset11.hpp"
#include "openvino/op/topk.hpp"
using namespace std;
using namespace ov::opset11;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -20,20 +20,20 @@ NamedOutputVector translate_top_k_base_op(const NodeContext& node,
// retrieve k attribute
bool sorted = node.get_attribute<bool>("sorted", true);
auto top_k = make_shared<TopK>(input,
k_input,
-1,
ov::op::v1::TopK::Mode::MAX,
sorted ? TopK::SortType::SORT_VALUES : TopK::SortType::SORT_INDICES,
ov::element::i32,
true);
auto top_k = make_shared<v11::TopK>(input,
k_input,
-1,
ov::op::v11::TopK::Mode::MAX,
sorted ? v11::TopK::SortType::SORT_VALUES : v11::TopK::SortType::SORT_INDICES,
ov::element::i32,
true);
set_node_name(node.get_name(), top_k);
return {{"values", top_k->output(0)}, {"indices", top_k->output(1)}};
}
NamedOutputVector translate_top_k_op(const NodeContext& node) {
// retrieve k attribute
auto k = node.get_attribute<int64_t>("k");
auto k_input = make_shared<Constant>(ov::element::i64, Shape{}, std::vector<int64_t>({k}));
auto k_input = make_shared<v0::Constant>(ov::element::i64, Shape{}, std::vector<int64_t>({k}));
return translate_top_k_base_op(node, k_input, 1);
}

View File

@ -2,11 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/transpose.hpp"
#include "common_op_table.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ov::opset8;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -17,7 +18,7 @@ OutputVector translate_transpose_op(const NodeContext& node) {
default_op_checks(node, 2, {"Transpose", "TRANSPOSE"});
auto x = node.get_input(0);
auto perm = node.get_input(1);
auto transpose = make_shared<Transpose>(x, perm);
auto transpose = make_shared<v1::Transpose>(x, perm);
set_node_name(node.get_name(), transpose);
return {transpose};
}

View File

@ -2,11 +2,13 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/unique.hpp"
#include "common_op_table.hpp"
#include "openvino/opsets/opset10.hpp"
using namespace std;
using namespace ov;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -20,7 +22,7 @@ NamedOutputVector translate_unique_op(const NodeContext& node) {
auto node_name = node.get_name();
auto input_values = node.get_input(0);
auto output_indices_type = node.get_attribute<ov::element::Type>("out_idx", ov::element::i32);
auto unique = make_shared<opset10::Unique>(input_values, false, output_indices_type);
auto unique = make_shared<v10::Unique>(input_values, false, output_indices_type);
// set up new Unique node name and tensor names manually
// because the second and fourth outputs of OpenVINO Unique are not needed

View File

@ -3,10 +3,12 @@
//
#include "common_op_table.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/split.hpp"
#include "openvino/op/squeeze.hpp"
using namespace std;
using namespace ov::opset8;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -19,11 +21,11 @@ OutputVector translate_unpack_op(const NodeContext& node) {
auto axis = node.get_attribute<int64_t>("axis", 0);
auto num = node.get_attribute<int64_t>("num");
auto axis_const = make_shared<Constant>(element::i64, Shape{}, axis);
auto split = make_shared<Split>(value, axis_const, num);
auto axis_const = make_shared<v0::Constant>(element::i64, Shape{}, axis);
auto split = make_shared<v1::Split>(value, axis_const, num);
OutputVector unpack_outputs;
for (int output_ind = 0; output_ind < num; ++output_ind) {
auto unpack_output = make_shared<Squeeze>(split->output(output_ind), axis_const);
auto unpack_output = make_shared<v0::Squeeze>(split->output(output_ind), axis_const);
set_out_name(node.get_name() + ":" + to_string(output_ind), unpack_output);
unpack_outputs.push_back(unpack_output);
}

View File

@ -3,11 +3,13 @@
//
#include "common_op_table.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/non_zero.hpp"
#include "openvino/op/transpose.hpp"
using namespace std;
using namespace ov;
using namespace ov::opset8;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -17,9 +19,9 @@ namespace op {
OutputVector translate_where_op(const NodeContext& node) {
default_op_checks(node, 1, {"Where"});
auto condition = node.get_input(0);
auto non_zero = make_shared<NonZero>(condition, element::i64);
auto transpose_order = make_shared<Constant>(element::i32, Shape{2}, vector<int32_t>{1, 0});
auto res = make_shared<opset8::Transpose>(non_zero, transpose_order);
auto non_zero = make_shared<v3::NonZero>(condition, element::i64);
auto transpose_order = make_shared<v0::Constant>(element::i32, Shape{2}, vector<int32_t>{1, 0});
auto res = make_shared<v1::Transpose>(non_zero, transpose_order);
set_node_name(node.get_name(), res);
return res->outputs();
}

View File

@ -3,11 +3,13 @@
//
#include "common_op_table.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/equal.hpp"
#include "openvino/op/select.hpp"
#include "utils.hpp"
using namespace std;
using namespace ov::opset8;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -23,9 +25,9 @@ OutputVector translate_x_div_y_op(const NodeContext& node) {
auto const_zero = create_same_type_const_scalar<int32_t>(x, 0);
auto const_one = create_same_type_const_scalar<int32_t>(x, 1);
auto x_is_zero = make_shared<Equal>(x, const_zero);
auto select = make_shared<Select>(x_is_zero, const_one, y);
auto xdivy = make_shared<Divide>(x, select);
auto x_is_zero = make_shared<v1::Equal>(x, const_zero);
auto select = make_shared<v1::Select>(x_is_zero, const_one, y);
auto xdivy = make_shared<v1::Divide>(x, select);
set_node_name(node.get_name(), xdivy);
return {xdivy};
}

View File

@ -3,11 +3,15 @@
//
#include "common_op_table.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/squeeze.hpp"
#include "utils.hpp"
using namespace std;
using namespace ov::opset8;
using namespace ov::op;
namespace ov {
namespace frontend {
@ -17,20 +21,20 @@ namespace op {
OutputVector translate_zeros_like_op(const NodeContext& node) {
default_op_checks(node, 1, {"ZerosLike", "ZEROS_LIKE"});
auto x = node.get_input(0);
Output<Node> shape_of = make_shared<ShapeOf>(x, element::i32);
Output<Node> shape_of = make_shared<v3::ShapeOf>(x, element::i32);
auto zero_const = create_same_type_const_scalar<int32_t>(x, 0);
// in case of x to be scalar, we need handle it more specifically
// since Broadcast supports only broadcasting to rank greater 0
// we have to introduce extra dimension for input scalar case
auto zero_int_const = make_shared<Constant>(element::i32, Shape{1}, 0);
auto one_int_const = make_shared<Constant>(element::i32, Shape{1}, 1);
shape_of = make_shared<Concat>(OutputVector{one_int_const, shape_of}, 0);
auto zero_int_const = make_shared<v0::Constant>(element::i32, Shape{1}, 0);
auto one_int_const = make_shared<v0::Constant>(element::i32, Shape{1}, 1);
shape_of = make_shared<v0::Concat>(OutputVector{one_int_const, shape_of}, 0);
// create a tensor of zeros of shape with extra dimension
Output<Node> zeros_like = make_shared<Broadcast>(zero_const, shape_of);
Output<Node> zeros_like = make_shared<v3::Broadcast>(zero_const, shape_of);
// remove extra dimension by squeezing
zeros_like = make_shared<Squeeze>(zeros_like, zero_int_const);
zeros_like = make_shared<v0::Squeeze>(zeros_like, zero_int_const);
set_node_name(node.get_name(), zeros_like.get_node_shared_ptr());
return {zeros_like};

View File

@ -8,11 +8,20 @@
#include "common_op_table.hpp"
#include "helper_ops/complex_type_mark.hpp"
#include "openvino/opsets/opset10.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convolution.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/maximum.hpp"
#include "openvino/op/pad.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/slice.hpp"
#include "openvino/op/subtract.hpp"
#include "openvino/op/transpose.hpp"
using namespace ov;
using namespace ov::op;
using namespace ov::opset10;
using namespace std;
using namespace ov::frontend::tensorflow;
@ -213,29 +222,29 @@ OutputVector translate_convolution_op(const frontend::NodeContext& node, size_t
// grouped convolutional operation
// compute input channels given from the input and the filter
// and number of groups required to split the filter
auto input_shape = make_shared<ShapeOf>(input, element::i32);
auto filter_shape = make_shared<ShapeOf>(filter, element::i32);
auto zero_const = make_shared<Constant>(element::i32, Shape{1}, 0);
auto one_const = make_shared<Constant>(element::i32, Shape{1}, 1);
auto two_const = make_shared<Constant>(element::i32, Shape{1}, 2);
auto input_cin = make_shared<Slice>(input_shape, one_const, two_const, one_const);
auto filter_cin = make_shared<Slice>(filter_shape, one_const, two_const, one_const);
auto num_groups = make_shared<Divide>(input_cin, filter_cin);
auto input_shape = make_shared<v3::ShapeOf>(input, element::i32);
auto filter_shape = make_shared<v3::ShapeOf>(filter, element::i32);
auto zero_const = make_shared<v0::Constant>(element::i32, Shape{1}, 0);
auto one_const = make_shared<v0::Constant>(element::i32, Shape{1}, 1);
auto two_const = make_shared<v0::Constant>(element::i32, Shape{1}, 2);
auto input_cin = make_shared<v8::Slice>(input_shape, one_const, two_const, one_const);
auto filter_cin = make_shared<v8::Slice>(filter_shape, one_const, two_const, one_const);
auto num_groups = make_shared<v1::Divide>(input_cin, filter_cin);
// reshape the filter based on the number of groups information
auto int_max_const = make_shared<Constant>(element::i32, Shape{1}, numeric_limits<int>::max());
auto filter_cout = make_shared<Slice>(filter_shape, zero_const, one_const, one_const);
auto filter_new_cout = make_shared<Divide>(filter_cout, num_groups);
auto shape_cin_xy = make_shared<Slice>(filter_shape, one_const, int_max_const, one_const);
auto filter_new_shape = make_shared<Concat>(OutputVector{num_groups, filter_new_cout, shape_cin_xy}, 0);
auto new_filter = make_shared<Reshape>(filter, filter_new_shape, false);
conv = make_shared<GroupConvolution>(input, new_filter, strides, pads_begin, pads_end, dilations, auto_pad);
auto int_max_const = make_shared<v0::Constant>(element::i32, Shape{1}, numeric_limits<int>::max());
auto filter_cout = make_shared<v8::Slice>(filter_shape, zero_const, one_const, one_const);
auto filter_new_cout = make_shared<v1::Divide>(filter_cout, num_groups);
auto shape_cin_xy = make_shared<v8::Slice>(filter_shape, one_const, int_max_const, one_const);
auto filter_new_shape = make_shared<v0::Concat>(OutputVector{num_groups, filter_new_cout, shape_cin_xy}, 0);
auto new_filter = make_shared<v1::Reshape>(filter, filter_new_shape, false);
conv = make_shared<v1::GroupConvolution>(input, new_filter, strides, pads_begin, pads_end, dilations, auto_pad);
} else {
// assumption to use regular convolution for all other cases is taken from the legacy frontend
// this solution is sufficient for all observed models in the validation
// in general, it has limitation and it needs to use grouped convolution when num_groups is not static
// 118107: remove this assumtpion when it obtains complete shape propagation in the core
conv = make_shared<Convolution>(input, filter, strides, pads_begin, pads_end, dilations, auto_pad);
conv = make_shared<v1::Convolution>(input, filter, strides, pads_begin, pads_end, dilations, auto_pad);
}
convert_nchw_to_nhwc(is_nhwc, conv, Rank(spatial_dims_num + 2));
@ -286,9 +295,9 @@ Output<Node> get_elements_number_1d(const Output<Node>& output, element::Type ou
FRONT_END_OP_CONVERSION_CHECK(false,
"Internal error: get_elements_number_1d method supports only 1D input tensor.");
}
auto shape = rg.make<ShapeOf>(output, output_type);
auto const_zero = make_shared<Constant>(element::i32, Shape{}, 0);
auto num_elements = rg.make<Squeeze>(shape, const_zero);
auto shape = rg.make<v3::ShapeOf>(output, output_type);
auto const_zero = make_shared<v0::Constant>(element::i32, Shape{}, 0);
auto num_elements = rg.make<v0::Squeeze>(shape, const_zero);
return num_elements;
}
@ -315,12 +324,12 @@ PadMode convert_padding_mode(const NodeContext& node, const string& padding_mode
}
Output<Node> compute_subgraph_scalar_rank(const Output<Node>& output, element::Type output_type, bool as_scalar) {
auto shape_of = make_shared<ShapeOf>(output, output_type);
auto rank_of = make_shared<ShapeOf>(shape_of, output_type);
auto shape_of = make_shared<v3::ShapeOf>(output, output_type);
auto rank_of = make_shared<v3::ShapeOf>(shape_of, output_type);
if (as_scalar) {
auto const_zero = make_shared<Constant>(element::i32, Shape{}, 0);
return make_shared<Squeeze>(rank_of, const_zero);
auto const_zero = make_shared<v0::Constant>(element::i32, Shape{}, 0);
return make_shared<v0::Squeeze>(rank_of, const_zero);
}
return rank_of;
}
@ -361,41 +370,41 @@ void convert_nchw_to_nhwc(bool need_convert, Output<Node>& node, Rank input_rank
}
}
shared_ptr<Transpose> make_transpose(const Output<Node>& arg, const AxisVector& input_order) {
auto order = make_shared<Constant>(element::i64, Shape{input_order.size()}, input_order);
auto transpose = make_shared<Transpose>(arg, order);
shared_ptr<v1::Transpose> make_transpose(const Output<Node>& arg, const AxisVector& input_order) {
auto order = make_shared<v0::Constant>(element::i64, Shape{input_order.size()}, input_order);
auto transpose = make_shared<v1::Transpose>(arg, order);
return transpose;
}
shared_ptr<Reshape> make_reshape(const Output<Node>& arg, const vector<int64_t>& new_shape) {
auto new_shape_node = make_shared<Constant>(element::i64, Shape{new_shape.size()}, new_shape);
auto reshape = make_shared<Reshape>(arg, new_shape_node, true);
shared_ptr<v1::Reshape> make_reshape(const Output<Node>& arg, const vector<int64_t>& new_shape) {
auto new_shape_node = make_shared<v0::Constant>(element::i64, Shape{new_shape.size()}, new_shape);
auto reshape = make_shared<v1::Reshape>(arg, new_shape_node, true);
return reshape;
}
Output<Node> get_data_slice(const Output<Node>& data, const int64_t& start, const int64_t& stop, const int64_t& step) {
auto start_const = make_shared<Constant>(element::i64, Shape{1}, start);
auto stop_const = make_shared<Constant>(element::i64, Shape{1}, stop);
auto step_const = make_shared<Constant>(element::i64, Shape{1}, step);
return make_shared<Slice>(data, start_const, stop_const, step_const)->output(0);
auto start_const = make_shared<v0::Constant>(element::i64, Shape{1}, start);
auto stop_const = make_shared<v0::Constant>(element::i64, Shape{1}, stop);
auto step_const = make_shared<v0::Constant>(element::i64, Shape{1}, step);
return make_shared<v8::Slice>(data, start_const, stop_const, step_const)->output(0);
}
Output<Node> compute_broadcast_args(const Output<Node>& shape1, const Output<Node>& shape2) {
// compute a number of shape elements to append for broadcasting
auto size0 = make_shared<ShapeOf>(shape1);
auto size1 = make_shared<ShapeOf>(shape2);
auto max_size = make_shared<Maximum>(size0, size1);
auto diff1 = make_shared<Subtract>(max_size, size0);
auto diff2 = make_shared<Subtract>(max_size, size1);
auto size0 = make_shared<v3::ShapeOf>(shape1);
auto size1 = make_shared<v3::ShapeOf>(shape2);
auto max_size = make_shared<v1::Maximum>(size0, size1);
auto diff1 = make_shared<v1::Subtract>(max_size, size0);
auto diff2 = make_shared<v1::Subtract>(max_size, size1);
// pad the shortest shape value with minus ones
// to take dynamic shapes into account
auto const_zero = create_same_type_const<int64_t>(diff1, std::vector<int64_t>{0}, Shape{1});
auto const_one = create_same_type_const_scalar<int64_t>(shape1, 1);
auto padded_s0 = make_shared<Pad>(shape1, diff1, const_zero, const_one, ov::op::PadMode::CONSTANT);
auto padded_s1 = make_shared<Pad>(shape2, diff2, const_zero, const_one, ov::op::PadMode::CONSTANT);
auto padded_s0 = make_shared<v1::Pad>(shape1, diff1, const_zero, const_one, ov::op::PadMode::CONSTANT);
auto padded_s1 = make_shared<v1::Pad>(shape2, diff2, const_zero, const_one, ov::op::PadMode::CONSTANT);
auto broadcasted_shape = make_shared<Maximum>(padded_s0, padded_s1);
auto broadcasted_shape = make_shared<v1::Maximum>(padded_s0, padded_s1);
return broadcasted_shape->output(0);
}
} // namespace tensorflow