[TF FE] Refactor translators for Conv2d and Conv3d (#12444)

It allows to convert CNN-Transformer model. Padding was previously incorrect.

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev 2022-08-08 11:29:31 +03:00 committed by GitHub
parent b3341defce
commit 76e78c3400
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 126 additions and 113 deletions

View File

@ -4,6 +4,7 @@
#include "op_table.hpp" #include "op_table.hpp"
#include "openvino/opsets/opset8.hpp" #include "openvino/opsets/opset8.hpp"
#include "utils.hpp"
using namespace std; using namespace std;
using namespace ov::opset8; using namespace ov::opset8;
@ -14,61 +15,7 @@ namespace tensorflow {
namespace op { namespace op {
OutputVector translate_conv_2d_op(const NodeContext& node) { OutputVector translate_conv_2d_op(const NodeContext& node) {
auto ng_input = node.get_input(0), ng_filter = node.get_input(1); return translate_convolution_op(node, 2);
// retrieve attributes for Conv2D
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
auto tf_padding_type = node.get_attribute<std::string>("padding");
// retrieve optional attributes
auto tf_data_format = node.get_attribute<std::string>("data_format", "NHWC");
auto tf_dilations = node.get_attribute<std::vector<int64_t>>("dilations", {1, 1, 1, 1});
TENSORFLOW_OP_VALIDATION(node,
tf_data_format == "NHWC" || tf_data_format == "NCHW",
"Conv2D data format is neither NHWC nor NCHW");
bool is_nhwc = (tf_data_format == "NHWC");
// TF Kernel Test Checks
// Strides in the batch and depth dimension is not supported
if (tf_strides[0] != 1 || tf_strides[is_nhwc ? 3 : 1] != 1) {
TENSORFLOW_OP_VALIDATION(node,
false,
"Strides in batch and depth dimensions is not supported: " + node.get_op_type());
}
Strides ng_strides(2);
Strides ng_dilations(2);
Shape ng_image_shape(2);
Shape ng_kernel_shape(2);
convert_nhwc_to_hw(is_nhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_nhwc, ng_input.get_shape(), ng_image_shape);
convert_nhwc_to_hw(is_nhwc, tf_dilations, ng_dilations);
convert_nhwc_to_nchw(is_nhwc, ng_input);
auto& ng_filter_shape = ng_filter.get_shape();
ng_kernel_shape[0] = ng_filter_shape[0];
ng_kernel_shape[1] = ng_filter_shape[1];
ng_filter = make_transpose(ng_filter, {3, 2, 0, 1});
CoordinateDiff ng_padding_below;
CoordinateDiff ng_padding_above;
make_padding(tf_padding_type,
ng_image_shape,
ng_kernel_shape,
ng_strides,
ng_dilations,
ng_padding_below,
ng_padding_above);
Output<Node> res =
make_shared<Convolution>(ng_input, ng_filter, ng_strides, ng_padding_below, ng_padding_above, ng_dilations);
convert_nchw_to_nhwc(is_nhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
} }
} // namespace op } // namespace op
} // namespace tensorflow } // namespace tensorflow

View File

@ -15,64 +15,7 @@ namespace tensorflow {
namespace op { namespace op {
OutputVector translate_conv_3d_op(const NodeContext& node) { OutputVector translate_conv_3d_op(const NodeContext& node) {
auto ng_input = node.get_input(0), ng_filter = node.get_input(1); return translate_convolution_op(node, 3);
// retrieve attributes for Conv3D
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
auto tf_padding_type = node.get_attribute<std::string>("padding");
// retrieve optional attributes
auto tf_data_format = node.get_attribute<std::string>("data_format", "NDHWC");
auto tf_dilations = node.get_attribute<std::vector<int64_t>>("dilations", {1, 1, 1, 1, 1});
TENSORFLOW_OP_VALIDATION(node,
tf_data_format == "NDHWC" || tf_data_format == "NCDHW",
"Conv3D data format is neither NDHWC nor NCDHW");
bool is_ndhwc = (tf_data_format == "NDHWC");
// TODO: in 3D
// TF Kernel Test Checks
// // Strides in the batch and depth dimension is not supported
// if (tf_strides[0] != 1 || tf_strides[is_nhwc ? 3 : 1] != 1) {
// return errors::InvalidArgument(
// "Strides in batch and depth dimensions is not supported: ",
// op->type_string());
// }
Strides ng_strides(3);
Strides ng_dilations(3);
Shape ng_image_shape(3);
Shape ng_kernel_shape(3);
convert_nhwc_to_hw(is_ndhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_ndhwc, ng_input.get_shape(), ng_image_shape);
convert_nhwc_to_hw(is_ndhwc, tf_dilations, ng_dilations);
convert_nhwc_to_nchw(is_ndhwc, ng_input);
auto& ng_filter_shape = ng_filter.get_shape();
ng_kernel_shape[0] = ng_filter_shape[0];
ng_kernel_shape[1] = ng_filter_shape[1];
ng_kernel_shape[2] = ng_filter_shape[2];
ng_filter = make_transpose(ng_filter, {4, 3, 0, 1, 2});
CoordinateDiff ng_padding_below;
CoordinateDiff ng_padding_above;
make_padding(tf_padding_type,
ng_image_shape,
ng_kernel_shape,
ng_strides,
ng_dilations,
ng_padding_below,
ng_padding_above);
auto res_node =
make_shared<Convolution>(ng_input, ng_filter, ng_strides, ng_padding_below, ng_padding_above, ng_dilations);
auto res = res_node->output(0);
convert_nchw_to_nhwc(is_ndhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
} }
} // namespace op } // namespace op
} // namespace tensorflow } // namespace tensorflow

View File

@ -4,6 +4,10 @@
#include "utils.hpp" #include "utils.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace ov::opset8;
void ov::frontend::tensorflow::tf_shape_to_ov_shape(const ::tensorflow::TensorShapeProto& tf_shape, void ov::frontend::tensorflow::tf_shape_to_ov_shape(const ::tensorflow::TensorShapeProto& tf_shape,
ov::PartialShape* ng_shape) { ov::PartialShape* ng_shape) {
std::vector<ov::Dimension> dims; std::vector<ov::Dimension> dims;
@ -62,3 +66,120 @@ ov::op::PadType ov::frontend::tensorflow::convert_conv_tf_padding(const ov::fron
return ov::op::PadType::EXPLICIT; return ov::op::PadType::EXPLICIT;
} }
void fill_explicit_pads_vectors(const ov::frontend::tensorflow::NodeContext& node,
bool is_nhwc,
size_t spatial_dims_num,
const std::vector<int64_t>& tf_explicit_paddings,
ov::CoordinateDiff& pads_begin,
ov::CoordinateDiff& pads_end) {
if (spatial_dims_num == 2) {
TENSORFLOW_OP_VALIDATION(node,
tf_explicit_paddings.size() == 8,
"Conv2D expects 8 padding values for EXPLICIT padding mode.");
// prepare pads_begin and pads_end attributes for EXPLICIT padding mode
if (is_nhwc) {
// For NHWC layout, explicit paddings has the following form:
// [0, 0, pad_h1, pad_h2, pad_w1, pad_w2, 0, 0]
pads_begin.push_back(tf_explicit_paddings[2]);
pads_begin.push_back(tf_explicit_paddings[4]);
pads_end.push_back(tf_explicit_paddings[3]);
pads_end.push_back(tf_explicit_paddings[5]);
} else {
// For NCHW layout, explicit paddings has the following form:
// [0, 0, 0, 0, pad_h1, pad_h2, pad_w1, pad_w2]
pads_begin.push_back(tf_explicit_paddings[4]);
pads_begin.push_back(tf_explicit_paddings[6]);
pads_end.push_back(tf_explicit_paddings[5]);
pads_end.push_back(tf_explicit_paddings[7]);
}
} else {
TENSORFLOW_OP_VALIDATION(node,
tf_explicit_paddings.size() == 10,
"Conv3D expects 10 padding values for EXPLICIT padding mode.");
// prepare pads_begin and pads_end attributes for EXPLICIT padding mode
if (is_nhwc) {
// For NDHWC layout, explicit paddings has the following form:
// [0, 0, pad_d1, pad_d2, pad_h1, pad_h2, pad_w1, pad_w2, 0, 0]
pads_begin.push_back(tf_explicit_paddings[2]);
pads_begin.push_back(tf_explicit_paddings[4]);
pads_begin.push_back(tf_explicit_paddings[6]);
pads_end.push_back(tf_explicit_paddings[3]);
pads_end.push_back(tf_explicit_paddings[5]);
pads_end.push_back(tf_explicit_paddings[7]);
} else {
// For NCDHW layout, explicit paddings has the following form:
// [0, 0, 0, 0, pad_d1, pad_d2, pad_h1, pad_h2, pad_w1, pad_w2]
pads_begin.push_back(tf_explicit_paddings[4]);
pads_begin.push_back(tf_explicit_paddings[6]);
pads_begin.push_back(tf_explicit_paddings[8]);
pads_end.push_back(tf_explicit_paddings[5]);
pads_end.push_back(tf_explicit_paddings[7]);
pads_end.push_back(tf_explicit_paddings[9]);
}
}
}
ov::OutputVector ov::frontend::tensorflow::translate_convolution_op(const ov::frontend::tensorflow::NodeContext& node,
size_t spatial_dims_num) {
TENSORFLOW_OP_VALIDATION(node,
spatial_dims_num == 2 || spatial_dims_num == 3,
"Conv2D or Conv3D are supported only.");
TENSORFLOW_OP_VALIDATION(node, node.get_input_size() >= 2, "Convolution must have at least two inputs.");
auto input = node.get_input(0);
auto filter = node.get_input(1);
// retrieve attributes for Conv2D
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
auto tf_padding_type = node.get_attribute<std::string>("padding");
ov::op::PadType auto_pad = convert_conv_tf_padding(node, tf_padding_type);
// retrieve optional attributes
auto tf_data_format = node.get_attribute<std::string>("data_format", spatial_dims_num == 2 ? "NHWC" : "NDHWC");
auto tf_explicit_paddings = std::vector<int64_t>{};
if (auto_pad == ov::op::PadType::EXPLICIT) {
tf_explicit_paddings = node.get_attribute<std::vector<int64_t>>("explicit_paddings", {});
}
std::vector<int64_t> dilation_2d = {1, 1, 1, 1};
std::vector<int64_t> dilation_3d = {1, 1, 1, 1, 1};
auto tf_dilations =
node.get_attribute<std::vector<int64_t>>("dilations", spatial_dims_num == 2 ? dilation_2d : dilation_3d);
bool is_nhwc = true;
if (spatial_dims_num == 2) {
TENSORFLOW_OP_VALIDATION(node,
tf_data_format == "NHWC" || tf_data_format == "NCHW",
"Conv2D data format is neither NHWC nor NCHW");
is_nhwc = (tf_data_format == "NHWC");
} else {
TENSORFLOW_OP_VALIDATION(node,
tf_data_format == "NDHWC" || tf_data_format == "NCDHW",
"Conv3D data format is neither NDHWC nor NCDHW");
is_nhwc = (tf_data_format == "NDHWC");
}
// prepare attributes for OpenVINO ConvolutionBackpropData
ov::Strides strides(spatial_dims_num);
ov::Strides dilations(spatial_dims_num);
ov::frontend::tensorflow::convert_nhwc_to_hw(is_nhwc, tf_strides, strides);
ov::frontend::tensorflow::convert_nhwc_to_hw(is_nhwc, tf_dilations, dilations);
ov::CoordinateDiff pads_begin;
ov::CoordinateDiff pads_end;
if (auto_pad == ov::op::PadType::EXPLICIT) {
fill_explicit_pads_vectors(node, is_nhwc, spatial_dims_num, tf_explicit_paddings, pads_begin, pads_end);
}
// prepare inputs to Convolution
ov::frontend::tensorflow::convert_nhwc_to_nchw(is_nhwc, input);
ov::AxisVector permutation_2d = {3, 2, 0, 1};
ov::AxisVector permutation_3d = {4, 3, 0, 1, 2};
filter = ov::frontend::tensorflow::make_transpose(filter, spatial_dims_num == 2 ? permutation_2d : permutation_3d);
ov::Output<ov::Node> conv =
std::make_shared<Convolution>(input, filter, strides, pads_begin, pads_end, dilations, auto_pad);
ov::frontend::tensorflow::convert_nchw_to_nhwc(is_nhwc, conv);
ov::frontend::tensorflow::set_node_name(node.get_name(), conv.get_node_shared_ptr());
return {conv};
}

View File

@ -199,6 +199,8 @@ void make_const_op(const NodeContext& node, element::Type et, ov::Output<ov::Nod
ov::op::PadType convert_conv_tf_padding(const NodeContext& node, const std::string& tf_padding); ov::op::PadType convert_conv_tf_padding(const NodeContext& node, const std::string& tf_padding);
ov::OutputVector translate_convolution_op(const NodeContext& node, size_t spatial_dims_num);
} // namespace tensorflow } // namespace tensorflow
} // namespace frontend } // namespace frontend
} // namespace ov } // namespace ov