[TF FE] Implement translators for TensorFlow ConvBackpropInput operations (#12356)

* [TF FE] Implement ConvBackPropInput translators

Now the translators supports dynamic input_sizes attribute and different padding modes
including EXPLICIT mode

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix clang-style issue

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix code-style issue

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix code-style issue

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Apply code-review feedback and fix build issues

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Apply code-review feedback: check for input size

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix retrieving explicit_padding attribute

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix code style

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev
2022-08-02 14:56:53 +03:00
committed by GitHub
parent 73689845a5
commit 2fa9b4d1f4
14 changed files with 217 additions and 144 deletions

View File

@@ -39,7 +39,7 @@ OutputVector translate_avg_pool_op(const NodeContext& node) {
convert_nhwc_to_hw(is_nhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_nhwc, ng_input.get_shape(), ng_image_shape);
convert_nhwc_to_hw(is_nhwc, tf_ksize, ng_kernel_shape);
convert_nhwc_to_nchw(node.get_name(), is_nhwc, ng_input);
convert_nhwc_to_nchw(is_nhwc, ng_input);
CoordinateDiff padding_below;
CoordinateDiff padding_above;
@@ -66,7 +66,7 @@ OutputVector translate_avg_pool_op(const NodeContext& node) {
ov::op::RoundingType::FLOOR);
auto res = res_node->output(0);
convert_nchw_to_nhwc(node.get_name(), is_nhwc, res);
convert_nchw_to_nhwc(is_nhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
}

View File

@@ -46,7 +46,7 @@ OutputVector translate_conv_2d_op(const NodeContext& node) {
convert_nhwc_to_hw(is_nhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_nhwc, ng_input.get_shape(), ng_image_shape);
convert_nhwc_to_hw(is_nhwc, tf_dilations, ng_dilations);
convert_nhwc_to_nchw(node.get_name(), is_nhwc, ng_input);
convert_nhwc_to_nchw(is_nhwc, ng_input);
auto& ng_filter_shape = ng_filter.get_shape();
ng_kernel_shape[0] = ng_filter_shape[0];
@@ -66,7 +66,7 @@ OutputVector translate_conv_2d_op(const NodeContext& node) {
Output<Node> res =
make_shared<Convolution>(ng_input, ng_filter, ng_strides, ng_padding_below, ng_padding_above, ng_dilations);
convert_nchw_to_nhwc(node.get_name(), is_nhwc, res);
convert_nchw_to_nhwc(is_nhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
}

View File

@@ -3,7 +3,9 @@
//
#include "op_table.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/opsets/opset8.hpp"
#include "utils.hpp"
using namespace std;
using namespace ov::opset8;
@@ -14,86 +16,102 @@ namespace tensorflow {
namespace op {
OutputVector translate_conv_2d_backprop_input_op(const NodeContext& node) {
auto ng_filter = node.get_input(1), ng_out_backprop = node.get_input(2);
TENSORFLOW_OP_VALIDATION(node, node.get_input_size() >= 3, "Conv2DBackpropInput must have at least three inputs.");
auto input_sizes = node.get_input(0);
auto filter = node.get_input(1);
auto out_backprop = node.get_input(2);
// retrieve attributes for Conv2DBackpropInput
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
auto tf_padding_type = node.get_attribute<std::string>("padding");
ov::op::PadType auto_pad = convert_conv_tf_padding(node, tf_padding_type);
// retrieve optional attributes
auto tf_dilations = node.get_attribute<std::vector<int64_t>>("dilations", {1, 1, 1, 1});
auto tf_explicit_paddings = std::vector<int64_t>{};
if (auto_pad == ov::op::PadType::EXPLICIT) {
tf_explicit_paddings = node.get_attribute<std::vector<int64_t>>("explicit_paddings", {});
}
auto tf_data_format = node.get_attribute<std::string>("data_format", "NHWC");
TENSORFLOW_OP_VALIDATION(node,
tf_data_format == "NHWC" || tf_data_format == "NCHW",
"Conv2DBackpropInput data format is neither NHWC nor NCHW");
std::vector<int64_t> tf_input_sizes;
get_const_input(node, 0, &tf_input_sizes);
if (std::any_of(tf_input_sizes.begin(), tf_input_sizes.end(), [](int32_t size) {
return size <= 0;
})) {
FRONT_END_THROW("Conv2DBackpropInput input sizes must be positive integers");
if (auto_pad == ov::op::PadType::EXPLICIT) {
TENSORFLOW_OP_VALIDATION(node,
tf_explicit_paddings.size() == 8,
"Conv2DBackpropInput expects 8 padding values for EXPLICIT padding mode.");
}
bool is_nhwc = (tf_data_format == "NHWC");
Strides ng_strides(2);
Strides ng_dilations(2);
Shape ng_image_shape(2);
Shape ng_kernel_shape(2);
Shape ng_batch_shape(4);
// prepare attributes for OpenVINO ConvolutionBackpropData
Strides strides(2);
Strides dilations(2);
convert_nhwc_to_hw(is_nhwc, tf_strides, strides);
convert_nhwc_to_hw(is_nhwc, tf_dilations, dilations);
convert_nhwc_to_hw(is_nhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_nhwc, tf_dilations, ng_dilations);
convert_nhwc_to_hw(is_nhwc, tf_input_sizes, ng_image_shape);
convert_nhwc_to_nchw(node.get_name(), is_nhwc, ng_out_backprop);
if (is_nhwc) {
ng_batch_shape = {static_cast<unsigned long>(tf_input_sizes[0]),
static_cast<unsigned long>(tf_input_sizes[3]),
static_cast<unsigned long>(tf_input_sizes[1]),
static_cast<unsigned long>(tf_input_sizes[2])};
} else {
ng_batch_shape = {static_cast<unsigned long>(tf_input_sizes[0]),
static_cast<unsigned long>(tf_input_sizes[1]),
static_cast<unsigned long>(tf_input_sizes[2]),
static_cast<unsigned long>(tf_input_sizes[3])};
ov::CoordinateDiff pads_begin;
ov::CoordinateDiff pads_end;
if (auto_pad == ov::op::PadType::EXPLICIT) {
// prepare pads_begin and pads_end attributes for EXPLICIT padding mode
if (is_nhwc) {
// For NHWC layout, explicit paddings has the following form:
// [0, 0, pad_h1, pad_h2, pad_w1, pad_w2, 0, 0]
pads_begin.push_back(tf_explicit_paddings[2]);
pads_begin.push_back(tf_explicit_paddings[4]);
pads_end.push_back(tf_explicit_paddings[3]);
pads_end.push_back(tf_explicit_paddings[5]);
} else {
// For NCHW layout, explicit paddings has the following form:
// [0, 0, 0, 0, pad_h1, pad_h2, pad_w1, pad_w2]
pads_begin.push_back(tf_explicit_paddings[4]);
pads_begin.push_back(tf_explicit_paddings[6]);
pads_end.push_back(tf_explicit_paddings[5]);
pads_end.push_back(tf_explicit_paddings[7]);
}
}
auto& ng_filter_shape = ng_filter.get_shape();
ng_kernel_shape[0] = ng_filter_shape[0];
ng_kernel_shape[1] = ng_filter_shape[1];
ng_filter = make_transpose(ng_filter, {3, 2, 0, 1});
// prepare inputs to ConvolutionBackpropData
filter = make_transpose(filter, {3, 2, 0, 1});
convert_nhwc_to_nchw(is_nhwc, out_backprop);
CoordinateDiff ng_padding_below;
CoordinateDiff ng_padding_above;
make_padding(tf_padding_type,
ng_image_shape,
ng_kernel_shape,
ng_strides,
ng_dilations,
ng_padding_below,
ng_padding_above);
// initially think that output shape defined for NCHW layout
auto ss_begin = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{2});
auto ss_end = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{-1});
auto ss_strides = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{1});
auto ng_output_shape = make_shared<Constant>(element::i64,
Shape{ng_batch_shape.size() - 2},
vector<size_t>(ng_batch_shape.begin() + 2, ng_batch_shape.end()));
// change range of indices for spatial dimensions in case NHWC layout
if (is_nhwc) {
ss_begin = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{1});
ss_end = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{3});
}
auto res_node = make_shared<ConvolutionBackpropData>(ng_out_backprop,
ng_filter,
ng_output_shape,
ng_strides,
ng_padding_below,
ng_padding_above,
ng_dilations);
auto res = res_node->output(0);
auto spatial_shape = make_shared<StridedSlice>(input_sizes,
ss_begin,
ss_end,
ss_strides,
std::vector<int64_t>{},
std::vector<int64_t>{});
convert_nchw_to_nhwc(node.get_name(), is_nhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
auto conv_backprop = make_shared<ConvolutionBackpropData>(out_backprop,
filter,
spatial_shape,
strides,
pads_begin,
pads_end,
dilations,
auto_pad);
// insert Transpose only if original Conv2DBackpropInput is in NHWC layout
auto conv_backprop_output = conv_backprop->output(0);
convert_nchw_to_nhwc(is_nhwc, conv_backprop_output);
// move the original name to new ConvolutionBackpropData if original layout is NCHW
// move the original name to Transpose if original layout is NHWC
set_node_name(node.get_name(), conv_backprop_output.get_node_shared_ptr());
return {conv_backprop_output};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov
} // namespace ov

View File

@@ -48,7 +48,7 @@ OutputVector translate_conv_3d_op(const NodeContext& node) {
convert_nhwc_to_hw(is_ndhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_ndhwc, ng_input.get_shape(), ng_image_shape);
convert_nhwc_to_hw(is_ndhwc, tf_dilations, ng_dilations);
convert_nhwc_to_nchw(node.get_name(), is_ndhwc, ng_input);
convert_nhwc_to_nchw(is_ndhwc, ng_input);
auto& ng_filter_shape = ng_filter.get_shape();
ng_kernel_shape[0] = ng_filter_shape[0];
@@ -70,7 +70,7 @@ OutputVector translate_conv_3d_op(const NodeContext& node) {
make_shared<Convolution>(ng_input, ng_filter, ng_strides, ng_padding_below, ng_padding_above, ng_dilations);
auto res = res_node->output(0);
convert_nchw_to_nhwc(node.get_name(), is_ndhwc, res);
convert_nchw_to_nhwc(is_ndhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
}

View File

@@ -14,91 +14,104 @@ namespace tensorflow {
namespace op {
OutputVector translate_conv_3d_backprop_input_v2_op(const NodeContext& node) {
auto ng_filter = node.get_input(1);
auto ng_out_backprop = node.get_input(2);
TENSORFLOW_OP_VALIDATION(node, node.get_input_size() >= 3, "Conv3DBackpropInput must have at least three inputs.");
auto input_sizes = node.get_input(0);
auto filter = node.get_input(1);
auto out_backprop = node.get_input(2);
// retrieve attributes for Conv3DBackpropInputV2
// retrieve attributes for Conv3DBackpropInput
auto tf_strides = node.get_attribute<std::vector<int64_t>>("strides");
auto tf_padding_type = node.get_attribute<std::string>("padding");
ov::op::PadType auto_pad = convert_conv_tf_padding(node, tf_padding_type);
// retrieve optional attributes
auto tf_data_format = node.get_attribute<std::string>("data_format", "NDHWC");
auto tf_dilations = node.get_attribute<std::vector<int64_t>>("dilations", {1, 1, 1, 1, 1});
auto tf_explicit_paddings = std::vector<int64_t>{};
if (auto_pad == ov::op::PadType::EXPLICIT) {
tf_explicit_paddings = node.get_attribute<std::vector<int64_t>>("explicit_paddings", {});
}
auto tf_data_format = node.get_attribute<std::string>("data_format", "NDHWC");
TENSORFLOW_OP_VALIDATION(node,
tf_data_format == "NDHWC" || tf_data_format == "NCDHW",
"Conv3DBackpropInputV2 data format is neither NDHWC nor NCDHW. "
"Provided data format: ",
tf_data_format);
"Conv3DBackpropInput data format is neither NDHWC nor NCDHW");
if (auto_pad == ov::op::PadType::EXPLICIT) {
TENSORFLOW_OP_VALIDATION(node,
tf_explicit_paddings.size() == 10,
"Conv3DBackpropInput expects 10 padding values for EXPLICIT padding mode.");
}
bool is_nhwc = (tf_data_format == "NDHWC");
std::vector<int64_t> tf_input_sizes;
get_const_input(node, 0, &tf_input_sizes);
// prepare attributes for OpenVINO ConvolutionBackpropData
Strides strides(3);
Strides dilations(3);
convert_nhwc_to_hw(is_nhwc, tf_strides, strides);
convert_nhwc_to_hw(is_nhwc, tf_dilations, dilations);
if (std::any_of(tf_input_sizes.begin(), tf_input_sizes.end(), [](int32_t size) {
return size <= 0;
})) {
FRONT_END_THROW("Conv3DBackpropInputV2 input sizes must be positive integers");
ov::CoordinateDiff pads_begin;
ov::CoordinateDiff pads_end;
if (auto_pad == ov::op::PadType::EXPLICIT) {
// prepare pads_begin and pads_end attributes for EXPLICIT padding mode
if (is_nhwc) {
// For NDHWC layout, explicit paddings has the following form:
// [0, 0, pad_d1, pad_d2, pad_h1, pad_h2, pad_w1, pad_w2, 0, 0]
pads_begin.push_back(tf_explicit_paddings[2]);
pads_begin.push_back(tf_explicit_paddings[4]);
pads_begin.push_back(tf_explicit_paddings[6]);
pads_end.push_back(tf_explicit_paddings[3]);
pads_end.push_back(tf_explicit_paddings[5]);
pads_end.push_back(tf_explicit_paddings[7]);
} else {
// For NCDHW layout, explicit paddings has the following form:
// [0, 0, 0, 0, pad_d1, pad_d2, pad_h1, pad_h2, pad_w1, pad_w2]
pads_begin.push_back(tf_explicit_paddings[4]);
pads_begin.push_back(tf_explicit_paddings[6]);
pads_begin.push_back(tf_explicit_paddings[8]);
pads_end.push_back(tf_explicit_paddings[5]);
pads_end.push_back(tf_explicit_paddings[7]);
pads_end.push_back(tf_explicit_paddings[9]);
}
}
bool is_ndhwc = (tf_data_format == "NDHWC");
// prepare inputs to ConvolutionBackpropData
filter = make_transpose(filter, {4, 3, 0, 1, 2});
convert_nhwc_to_nchw(is_nhwc, out_backprop);
ov::Strides ng_strides(3);
ov::Strides ng_dilations(3);
ov::Shape ng_image_shape(3);
ov::Shape ng_kernel_shape(3);
ov::Shape ng_batch_shape(5);
// initially think that output shape defined for NCDHW layout
auto ss_begin = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{2});
auto ss_end = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{-1});
auto ss_strides = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{1});
convert_nhwc_to_hw(is_ndhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_ndhwc, tf_dilations, ng_dilations);
convert_nhwc_to_hw(is_ndhwc, tf_input_sizes, ng_image_shape);
convert_nhwc_to_nchw(node.get_name(), is_ndhwc, ng_out_backprop);
if (is_ndhwc) {
ng_batch_shape = {static_cast<unsigned long>(tf_input_sizes[0]),
static_cast<unsigned long>(tf_input_sizes[4]),
static_cast<unsigned long>(tf_input_sizes[1]),
static_cast<unsigned long>(tf_input_sizes[2]),
static_cast<unsigned long>(tf_input_sizes[3])};
} else {
ng_batch_shape = {static_cast<unsigned long>(tf_input_sizes[0]),
static_cast<unsigned long>(tf_input_sizes[1]),
static_cast<unsigned long>(tf_input_sizes[2]),
static_cast<unsigned long>(tf_input_sizes[3]),
static_cast<unsigned long>(tf_input_sizes[4])};
// change range of indices for spatial dimensions in case NDHWC layout
if (is_nhwc) {
ss_begin = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{1});
ss_end = make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{4});
}
auto& ng_filter_shape = ng_filter.get_shape();
ng_kernel_shape[0] = ng_filter_shape[0];
ng_kernel_shape[1] = ng_filter_shape[1];
ng_kernel_shape[2] = ng_filter_shape[2];
ng_filter = make_transpose(ng_filter, {4, 3, 0, 1, 2});
auto spatial_shape = make_shared<StridedSlice>(input_sizes,
ss_begin,
ss_end,
ss_strides,
std::vector<int64_t>{},
std::vector<int64_t>{});
ov::CoordinateDiff ng_padding_below;
ov::CoordinateDiff ng_padding_above;
auto conv_backprop = make_shared<ConvolutionBackpropData>(out_backprop,
filter,
spatial_shape,
strides,
pads_begin,
pads_end,
dilations,
auto_pad);
make_padding(tf_padding_type,
ng_image_shape,
ng_kernel_shape,
ng_strides,
ng_dilations,
ng_padding_below,
ng_padding_above);
// insert Transpose only if original Conv3DBackpropInput is in NDHWC layout
auto conv_backprop_output = conv_backprop->output(0);
convert_nchw_to_nhwc(is_nhwc, conv_backprop_output);
auto ng_output_shape = make_shared<Constant>(element::i64,
Shape{ng_batch_shape.size() - 2},
vector<size_t>(ng_batch_shape.begin() + 2, ng_batch_shape.end()));
auto res_node = make_shared<ConvolutionBackpropData>(ng_out_backprop,
ng_filter,
ng_output_shape,
ng_strides,
ng_padding_below,
ng_padding_above,
ng_dilations);
auto res = res_node->output(0);
convert_nchw_to_nhwc(node.get_name(), is_ndhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
// move the original name to new ConvolutionBackpropData if original layout is NCHW
// move the original name to Transpose if original layout is NHWC
set_node_name(node.get_name(), conv_backprop_output.get_node_shared_ptr());
return {conv_backprop_output};
}
} // namespace op
} // namespace tensorflow

View File

@@ -27,10 +27,10 @@ OutputVector translate_depth_to_space_op(const NodeContext& node) {
bool is_nhwc = (tf_data_format == "NHWC");
convert_nhwc_to_nchw(node.get_name(), is_nhwc, ng_input);
convert_nhwc_to_nchw(is_nhwc, ng_input);
auto ng_mode = DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST;
Output<Node> res = make_shared<DepthToSpace>(ng_input, ng_mode, block_size)->output(0);
convert_nchw_to_nhwc(node.get_name(), is_nhwc, res);
convert_nchw_to_nhwc(is_nhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
}

View File

@@ -36,7 +36,7 @@ OutputVector translate_depthwise_conv_2d_native_op(const NodeContext& node) {
convert_nhwc_to_hw(is_nhwc, ng_input.get_shape(), ng_image_shape);
convert_nhwc_to_hw(is_nhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_nhwc, tf_dilations, ng_dilations);
convert_nhwc_to_nchw(node.get_name(), is_nhwc, ng_input);
convert_nhwc_to_nchw(is_nhwc, ng_input);
auto& ng_filter_shape = ng_filter.get_shape();
ng_kernel_shape[0] = ng_filter_shape[0];
@@ -71,7 +71,7 @@ OutputVector translate_depthwise_conv_2d_native_op(const NodeContext& node) {
ng_dilations);
auto ng_conv = ng_conv_node->output(0);
convert_nchw_to_nhwc(node.get_name(), is_nhwc, ng_conv);
convert_nchw_to_nhwc(is_nhwc, ng_conv);
set_node_name(node.get_name(), ng_conv.get_node_shared_ptr());
return {ng_conv};
}

View File

@@ -34,11 +34,11 @@ OutputVector translate_fused_batch_norm_op(const NodeContext& node) {
OPENVINO_DEBUG << "epsilon: " << tf_epsilon;
convert_nhwc_to_nchw(node.get_name(), is_nhwc, ng_input);
convert_nhwc_to_nchw(is_nhwc, ng_input);
auto ng_batch_norm =
make_shared<BatchNormInference>(ng_input, ng_scale, ng_offset, ng_mean, ng_variance, tf_epsilon)->output(0);
convert_nchw_to_nhwc(node.get_name(), is_nhwc, ng_batch_norm);
convert_nchw_to_nhwc(is_nhwc, ng_batch_norm);
// TODO: Why are there so many? Is it correct?
OutputVector result = {ng_batch_norm, ng_mean, ng_variance, ng_mean, ng_variance};

View File

@@ -36,7 +36,7 @@ OutputVector translate_max_pool_op(const NodeContext& node) {
convert_nhwc_to_hw(is_nhwc, tf_strides, ng_strides);
convert_nhwc_to_hw(is_nhwc, ng_input.get_shape(), ng_image_shape);
convert_nhwc_to_hw(is_nhwc, tf_ksize, ng_kernel_shape);
convert_nhwc_to_nchw(node.get_name(), is_nhwc, ng_input);
convert_nhwc_to_nchw(is_nhwc, ng_input);
CoordinateDiff padding_below;
CoordinateDiff padding_above;
@@ -61,7 +61,7 @@ OutputVector translate_max_pool_op(const NodeContext& node) {
ov::op::RoundingType::FLOOR);
auto res = res_node->output(0);
convert_nchw_to_nhwc(node.get_name(), is_nhwc, res);
convert_nchw_to_nhwc(is_nhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
}

View File

@@ -22,10 +22,10 @@ OutputVector translate_space_to_depth_op(const NodeContext& node) {
TENSORFLOW_OP_VALIDATION(node, data_format == "NHWC" || data_format == "NCHW", "Unsupported data format.");
bool is_nhwc = (data_format == "NHWC");
convert_nhwc_to_nchw(node.get_name(), is_nhwc, input);
convert_nhwc_to_nchw(is_nhwc, input);
auto ng_mode = SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
auto res = make_shared<SpaceToDepth>(input, ng_mode, block_size)->output(0);
convert_nchw_to_nhwc(node.get_name(), is_nhwc, res);
convert_nchw_to_nhwc(is_nhwc, res);
set_node_name(node.get_name(), res.get_node_shared_ptr());
return {res};
}

View File

@@ -10,9 +10,11 @@ namespace ov {
namespace frontend {
namespace tensorflow {
void convert_nhwc_to_nchw(const std::string& op_name, bool need_convert, ov::Output<ov::Node>& node) {
void convert_nhwc_to_nchw(bool need_convert, ov::Output<ov::Node>& node) {
if (need_convert) {
auto rank = node.get_shape().size();
OPENVINO_ASSERT(node.get_partial_shape().rank().is_static(),
"The input rank must be static to convert to the first channel format.");
auto rank = node.get_partial_shape().rank().get_length();
if (rank == 4) {
node = make_transpose(node, {0, 3, 1, 2});
} else if (rank == 5) {
@@ -21,9 +23,11 @@ void convert_nhwc_to_nchw(const std::string& op_name, bool need_convert, ov::Out
}
}
void convert_nchw_to_nhwc(const std::string& op_name, bool need_convert, ov::Output<ov::Node>& node) {
void convert_nchw_to_nhwc(bool need_convert, ov::Output<ov::Node>& node) {
if (need_convert) {
auto rank = node.get_shape().size();
OPENVINO_ASSERT(node.get_partial_shape().rank().is_static(),
"The input rank must be static to convert to the last channel format.");
auto rank = node.get_partial_shape().rank().get_length();
if (rank == 4) {
node = make_transpose(node, {0, 2, 3, 1});
} else if (rank == 5) {

View File

@@ -43,9 +43,9 @@ void convert_nchw_to_hw(const std::vector<T>& src, std::vector<size_t>& dst) {
}
} // namespace detail
void convert_nhwc_to_nchw(const std::string& op_name, bool need_convert, ov::Output<ov::Node>& ng_input);
void convert_nhwc_to_nchw(bool need_convert, ov::Output<ov::Node>& node);
void convert_nchw_to_nhwc(const std::string& op_name, bool need_convert, ov::Output<ov::Node>& ng_node);
void convert_nchw_to_nhwc(bool need_convert, ov::Output<ov::Node>& node);
template <typename T>
void convert_nhwc_to_hw(bool is_nhwc, const std::vector<T>& src, std::vector<size_t>& dst) {

View File

@@ -27,3 +27,38 @@ void ov::frontend::tensorflow::set_node_name(const std::string& node_name, const
void ov::frontend::tensorflow::set_out_name(const std::string& out_name, const ov::Output<ov::Node>& output) {
output.get_tensor().add_names({out_name});
}
ov::op::PadType ov::frontend::tensorflow::convert_conv_tf_padding(const ov::frontend::tensorflow::NodeContext& node,
const std::string& tf_padding) {
auto op_type = node.get_op_type();
TENSORFLOW_OP_VALIDATION(node,
op_type == "Conv2D" || op_type == "Conv2DBackpropInput" || op_type == "Conv3D" ||
op_type == "Conv3DBackpropInputV2",
"The convert_conv_tf_padding routine supports only convolutional operations.");
TENSORFLOW_OP_VALIDATION(
node,
tf_padding == "VALID" || tf_padding == "SAME" || tf_padding == "EXPLICIT",
"The deconvolutional operation must have one of the padding type: VALID, SAME, and EXPLICIT.");
if (tf_padding == "VALID") {
return ov::op::PadType::VALID;
}
if (node.get_op_type() == "Conv2DBackpropInput" || node.get_op_type() == "Conv3DBackpropInputV2") {
if (tf_padding == "SAME") {
// According to the formulas for calculating auto_pad values of the
// ConvBackpropData layer in the Operation specification,
// the SAME_LOWER value matches to the SAME value in TensorFlow
return ov::op::PadType::SAME_LOWER;
}
} else if (node.get_op_type() == "Conv2D" || node.get_op_type() == "Conv3D") {
if (tf_padding == "SAME") {
// According to the formulas for calculating auto_pad values of the
// Conv layer in the Operation specification,
// the SAME_UPPER value matches to the SAME value in TensorFlow
return ov::op::PadType::SAME_UPPER;
}
}
return ov::op::PadType::EXPLICIT;
}

View File

@@ -196,6 +196,9 @@ void make_const_op(const NodeContext& node, element::Type et, ov::Output<ov::Nod
values_from_const_node<T, VecT>(node, &ng_shape, &const_values);
ng_node = std::make_shared<ov::opset8::Constant>(et, ng_shape, const_values);
};
ov::op::PadType convert_conv_tf_padding(const NodeContext& node, const std::string& tf_padding);
} // namespace tensorflow
} // namespace frontend
} // namespace ov