[TF FE] Refactor MaxPool operation translator for xj_feature model (#12485)
* [TF FE] Refactor MaxPool operation translator for xj_feature model Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com> * Correct MaxPoolV2 since it has three inputs Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com> Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
parent
4dc713c7eb
commit
f23fd569bc
@ -24,7 +24,7 @@ OutputVector translate_conv_2d_backprop_input_op(const NodeContext& node) {
|
||||
// retrieve attributes for Conv2DBackpropInput
|
||||
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
|
||||
auto tf_padding_type = node.get_attribute<std::string>("padding");
|
||||
ov::op::PadType auto_pad = convert_conv_tf_padding(node, tf_padding_type);
|
||||
ov::op::PadType auto_pad = convert_tf_padding(node, tf_padding_type);
|
||||
|
||||
// retrieve optional attributes
|
||||
auto tf_dilations = node.get_attribute<std::vector<int64_t>>("dilations", {1, 1, 1, 1});
|
||||
|
@ -22,7 +22,7 @@ OutputVector translate_conv_3d_backprop_input_v2_op(const NodeContext& node) {
|
||||
// retrieve attributes for Conv3DBackpropInput
|
||||
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
|
||||
auto tf_padding_type = node.get_attribute<std::string>("padding");
|
||||
ov::op::PadType auto_pad = convert_conv_tf_padding(node, tf_padding_type);
|
||||
ov::op::PadType auto_pad = convert_tf_padding(node, tf_padding_type);
|
||||
|
||||
// retrieve optional attributes
|
||||
auto tf_dilations = node.get_attribute<std::vector<int64_t>>("dilations", {1, 1, 1, 1, 1});
|
||||
|
@ -3,7 +3,8 @@
|
||||
//
|
||||
|
||||
#include "op_table.hpp"
|
||||
#include "openvino/opsets/opset7.hpp"
|
||||
#include "openvino/opsets/opset8.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ov;
|
||||
@ -14,56 +15,103 @@ namespace frontend {
|
||||
namespace tensorflow {
|
||||
namespace op {
|
||||
|
||||
OutputVector translate_max_pool_op(const NodeContext& node) {
|
||||
auto ng_input = node.get_input(0);
|
||||
OutputVector translate_max_pool_util(const NodeContext& node,
|
||||
size_t spatial_dims_num,
|
||||
const std::vector<int64_t>& tf_kernel_sizes,
|
||||
const std::vector<int64_t>& tf_strides) {
|
||||
TENSORFLOW_OP_VALIDATION(node, node.get_input_size() > 0, "MaxPool operation must have at least one input.");
|
||||
TENSORFLOW_OP_VALIDATION(node,
|
||||
spatial_dims_num == 2 || spatial_dims_num == 3,
|
||||
"Only MaxPool2D and MaxPool3D are supported.");
|
||||
auto input = node.get_input(0);
|
||||
|
||||
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
|
||||
auto tf_ksize = node.get_attribute<std::vector<int64_t>>("ksize");
|
||||
auto tf_padding_type = node.get_attribute<std::string>("padding");
|
||||
auto tf_data_format = node.get_attribute<std::string>("data_format");
|
||||
ov::op::PadType auto_pad = convert_tf_padding(node, tf_padding_type);
|
||||
auto tf_data_format = node.get_attribute<std::string>("data_format", spatial_dims_num == 2 ? "NHWC" : "NDHWC");
|
||||
|
||||
bool is_nhwc = (tf_data_format == "NHWC") || (tf_data_format == "NDHWC");
|
||||
|
||||
int N = 2;
|
||||
if (node.get_op_type() == "MaxPool3D") {
|
||||
N = 3;
|
||||
auto tf_explicit_paddings = std::vector<int64_t>{};
|
||||
if (auto_pad == ov::op::PadType::EXPLICIT) {
|
||||
tf_explicit_paddings = node.get_attribute<std::vector<int64_t>>("explicit_paddings", {});
|
||||
}
|
||||
Strides ng_strides(N);
|
||||
Shape ng_image_shape(N);
|
||||
Shape ng_kernel_shape(N);
|
||||
Shape ng_dilations(N, 1);
|
||||
|
||||
convert_nhwc_to_hw(is_nhwc, tf_strides, ng_strides);
|
||||
convert_nhwc_to_hw(is_nhwc, ng_input.get_shape(), ng_image_shape);
|
||||
convert_nhwc_to_hw(is_nhwc, tf_ksize, ng_kernel_shape);
|
||||
convert_nhwc_to_nchw(is_nhwc, ng_input);
|
||||
bool is_nhwc = true;
|
||||
if (spatial_dims_num == 2) {
|
||||
TENSORFLOW_OP_VALIDATION(node,
|
||||
tf_data_format == "NHWC" || tf_data_format == "NCHW",
|
||||
"MaxPool2D or MaxPoolV2 data format is neither NHWC nor NCHW");
|
||||
is_nhwc = (tf_data_format == "NHWC");
|
||||
} else {
|
||||
TENSORFLOW_OP_VALIDATION(node,
|
||||
tf_data_format == "NDHWC" || tf_data_format == "NCDHW",
|
||||
"MaxPool3D data format is neither NDHWC nor NCDHW");
|
||||
is_nhwc = (tf_data_format == "NDHWC");
|
||||
}
|
||||
|
||||
CoordinateDiff padding_below;
|
||||
CoordinateDiff padding_above;
|
||||
make_padding(tf_padding_type,
|
||||
ng_image_shape,
|
||||
ng_kernel_shape,
|
||||
ng_strides,
|
||||
ng_dilations,
|
||||
padding_below,
|
||||
padding_above);
|
||||
// prepare attributes for OpenVINO MaxPool operation
|
||||
ov::Strides strides(spatial_dims_num);
|
||||
ov::Strides dilations = (spatial_dims_num == 2 ? ov::Strides({1, 1}) : ov::Strides({1, 1, 1}));
|
||||
ov::Shape kernel_sizes(spatial_dims_num);
|
||||
ov::frontend::tensorflow::convert_nhwc_to_hw(is_nhwc, tf_strides, strides);
|
||||
ov::frontend::tensorflow::convert_nhwc_to_hw(is_nhwc, tf_kernel_sizes, kernel_sizes);
|
||||
|
||||
// TODO: remove this once OV supports negative padding
|
||||
// (CoordinateDiff) for MaxPool
|
||||
Shape ng_padding_below(padding_below.begin(), padding_below.end());
|
||||
Shape ng_padding_above(padding_above.begin(), padding_above.end());
|
||||
ov::CoordinateDiff pads_begin;
|
||||
ov::CoordinateDiff pads_end;
|
||||
if (auto_pad == ov::op::PadType::EXPLICIT) {
|
||||
fill_explicit_pads_vectors(node, is_nhwc, spatial_dims_num, tf_explicit_paddings, pads_begin, pads_end);
|
||||
}
|
||||
|
||||
auto res_node = make_shared<ov::opset7::MaxPool>(ng_input,
|
||||
ng_strides,
|
||||
ng_padding_below,
|
||||
ng_padding_above,
|
||||
ng_kernel_shape,
|
||||
ov::op::RoundingType::FLOOR);
|
||||
auto res = res_node->output(0);
|
||||
// prepare input to MaxPool
|
||||
convert_nhwc_to_nchw(is_nhwc, input);
|
||||
|
||||
convert_nchw_to_nhwc(is_nhwc, res);
|
||||
set_node_name(node.get_name(), res.get_node_shared_ptr());
|
||||
return {res};
|
||||
auto max_pool_node = std::make_shared<ov::opset8::MaxPool>(input,
|
||||
strides,
|
||||
dilations,
|
||||
ov::Shape(pads_begin.begin(), pads_begin.end()),
|
||||
ov::Shape(pads_end.begin(), pads_end.end()),
|
||||
kernel_sizes,
|
||||
ov::op::RoundingType::FLOOR,
|
||||
auto_pad);
|
||||
auto max_pool = max_pool_node->output(0);
|
||||
ov::frontend::tensorflow::convert_nchw_to_nhwc(is_nhwc, max_pool);
|
||||
ov::frontend::tensorflow::set_node_name(node.get_name(), max_pool.get_node_shared_ptr());
|
||||
return {max_pool};
|
||||
}
|
||||
|
||||
OutputVector translate_max_pool(const NodeContext& node, size_t spatial_dims_num) {
|
||||
// MaxPool2D and MaxPool3D have ksize and strides as attributes
|
||||
// retrieve attributes
|
||||
auto strides = node.get_attribute<std::vector<int64_t>>("strides");
|
||||
auto kernel_sizes = node.get_attribute<std::vector<int64_t>>("ksize");
|
||||
return translate_max_pool_util(node, spatial_dims_num, kernel_sizes, strides);
|
||||
}
|
||||
|
||||
OutputVector translate_max_pool_v2(const NodeContext& node) {
|
||||
// MaxPoolV2 has ksize and strides as input parameters
|
||||
TENSORFLOW_OP_VALIDATION(node, node.get_input_size() > 2, "MaxPoolV2 operation must have at least three inputs.");
|
||||
auto ksize = node.get_input(1);
|
||||
auto strides = node.get_input(2);
|
||||
|
||||
auto ksize_constant = get_constant_from_source(ksize);
|
||||
TENSORFLOW_OP_VALIDATION(node, ksize_constant, "MaxPoolV2 is supported only with constant ksize.");
|
||||
auto strides_constant = get_constant_from_source(strides);
|
||||
TENSORFLOW_OP_VALIDATION(node, ksize_constant, "MaxPoolV2 is supported only with constant strides.");
|
||||
|
||||
auto ksize_vector = ksize_constant->cast_vector<int64_t>();
|
||||
auto strides_vector = strides_constant->cast_vector<int64_t>();
|
||||
|
||||
return translate_max_pool_util(node, 2, ksize_vector, strides_vector);
|
||||
}
|
||||
|
||||
OutputVector translate_max_pool_op(const NodeContext& node) {
|
||||
if (node.get_op_type() == "MaxPool") {
|
||||
return translate_max_pool(node, 2);
|
||||
} else if (node.get_op_type() == "MaxPoolV2") {
|
||||
return translate_max_pool_v2(node);
|
||||
} else if (node.get_op_type() == "MaxPool3D") {
|
||||
return translate_max_pool(node, 3);
|
||||
} else {
|
||||
TENSORFLOW_OP_VALIDATION(node, false, "Only MaxPool2D, MaxPoolV2 and MaxPool3D are supported.");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace op
|
||||
|
@ -200,6 +200,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
|
||||
{"LRN", translate_lrn_op},
|
||||
{"MatMul", translate_mat_mul_op},
|
||||
{"MaxPool", translate_max_pool_op},
|
||||
{"MaxPoolV2", translate_max_pool_op},
|
||||
{"MaxPool3D", translate_max_pool_op},
|
||||
{"MirrorPad", translate_pad_op},
|
||||
{"NonMaxSuppression", translate_non_max_suppression_op},
|
||||
|
@ -32,13 +32,14 @@ void ov::frontend::tensorflow::set_out_name(const std::string& out_name, const o
|
||||
output.get_tensor().add_names({out_name});
|
||||
}
|
||||
|
||||
ov::op::PadType ov::frontend::tensorflow::convert_conv_tf_padding(const ov::frontend::tensorflow::NodeContext& node,
|
||||
ov::op::PadType ov::frontend::tensorflow::convert_tf_padding(const ov::frontend::tensorflow::NodeContext& node,
|
||||
const std::string& tf_padding) {
|
||||
auto op_type = node.get_op_type();
|
||||
|
||||
TENSORFLOW_OP_VALIDATION(node,
|
||||
op_type == "Conv2D" || op_type == "Conv2DBackpropInput" || op_type == "Conv3D" ||
|
||||
op_type == "Conv3DBackpropInputV2",
|
||||
op_type == "Conv3DBackpropInputV2" || op_type == "MaxPool" || op_type == "MaxPoolV2" ||
|
||||
op_type == "MaxPool3D",
|
||||
"The convert_conv_tf_padding routine supports only convolutional operations.");
|
||||
TENSORFLOW_OP_VALIDATION(
|
||||
node,
|
||||
@ -48,14 +49,15 @@ ov::op::PadType ov::frontend::tensorflow::convert_conv_tf_padding(const ov::fron
|
||||
if (tf_padding == "VALID") {
|
||||
return ov::op::PadType::VALID;
|
||||
}
|
||||
if (node.get_op_type() == "Conv2DBackpropInput" || node.get_op_type() == "Conv3DBackpropInputV2") {
|
||||
if (op_type == "Conv2DBackpropInput" || op_type == "Conv3DBackpropInputV2") {
|
||||
if (tf_padding == "SAME") {
|
||||
// According to the formulas for calculating auto_pad values of the
|
||||
// ConvBackpropData layer in the Operation specification,
|
||||
// the SAME_LOWER value matches to the SAME value in TensorFlow
|
||||
return ov::op::PadType::SAME_LOWER;
|
||||
}
|
||||
} else if (node.get_op_type() == "Conv2D" || node.get_op_type() == "Conv3D") {
|
||||
} else if (op_type == "Conv2D" || op_type == "Conv3D" || op_type == "MaxPool" || op_type == "MaxPoolV2" ||
|
||||
op_type == "MaxPool3D") {
|
||||
if (tf_padding == "SAME") {
|
||||
// According to the formulas for calculating auto_pad values of the
|
||||
// Conv layer in the Operation specification,
|
||||
@ -67,7 +69,7 @@ ov::op::PadType ov::frontend::tensorflow::convert_conv_tf_padding(const ov::fron
|
||||
return ov::op::PadType::EXPLICIT;
|
||||
}
|
||||
|
||||
void fill_explicit_pads_vectors(const ov::frontend::tensorflow::NodeContext& node,
|
||||
void ov::frontend::tensorflow::fill_explicit_pads_vectors(const ov::frontend::tensorflow::NodeContext& node,
|
||||
bool is_nhwc,
|
||||
size_t spatial_dims_num,
|
||||
const std::vector<int64_t>& tf_explicit_paddings,
|
||||
@ -127,7 +129,7 @@ ov::OutputVector ov::frontend::tensorflow::translate_convolution_op(const ov::fr
|
||||
// retrieve attributes for Conv2D
|
||||
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
|
||||
auto tf_padding_type = node.get_attribute<std::string>("padding");
|
||||
ov::op::PadType auto_pad = convert_conv_tf_padding(node, tf_padding_type);
|
||||
ov::op::PadType auto_pad = convert_tf_padding(node, tf_padding_type);
|
||||
|
||||
// retrieve optional attributes
|
||||
auto tf_data_format = node.get_attribute<std::string>("data_format", spatial_dims_num == 2 ? "NHWC" : "NDHWC");
|
||||
|
@ -199,10 +199,16 @@ void make_const_op(const NodeContext& node, element::Type et, ov::Output<ov::Nod
|
||||
ng_node = std::make_shared<ov::opset8::Constant>(et, ng_shape, const_values);
|
||||
};
|
||||
|
||||
ov::op::PadType convert_conv_tf_padding(const NodeContext& node, const std::string& tf_padding);
|
||||
ov::op::PadType convert_tf_padding(const NodeContext& node, const std::string& tf_padding);
|
||||
|
||||
ov::OutputVector translate_convolution_op(const NodeContext& node, size_t spatial_dims_num);
|
||||
|
||||
void fill_explicit_pads_vectors(const NodeContext& node,
|
||||
bool is_nhwc,
|
||||
size_t spatial_dims_num,
|
||||
const std::vector<int64_t>& tf_explicit_paddings,
|
||||
ov::CoordinateDiff& pads_begin,
|
||||
ov::CoordinateDiff& pads_end);
|
||||
} // namespace tensorflow
|
||||
} // namespace frontend
|
||||
} // namespace ov
|
||||
|
Loading…
Reference in New Issue
Block a user