[TF FE] Refactor Pad, PadV2, MirrorPad and add layer tests (#13597)

* [TF FE] Refactor Pad, PadV2, MirrorPad and add layer tests

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Recover translators for DynamicStitch

* Apply code-review feedback and workaround for i32 paddings

* Return required type for convert_pad_mode function

* Work around IR reader limitation with i32 type for Pad

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev 2022-11-03 02:02:36 +03:00 committed by GitHub
parent 838a6503a0
commit fdee4ac703
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 145 additions and 60 deletions

View File

@ -3,71 +3,82 @@
//
#include "op_table.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/opsets/opset9.hpp"
using namespace std;
using namespace ov::opset8;
using namespace ov;
using namespace ov::opset9;
// 3 different Pad Ops: Pad, PadV2, MirrorPad
// See https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/pad
// See https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/pad-v2
// See https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/mirror-pad
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
static void slice_pads_begin_end(const Output<Node>& paddings,
shared_ptr<Node>& pads_begin,
shared_ptr<Node>& pads_end) {
// TODO: fix IR reader to accept padding of i32 type
auto paddings_i64 = make_shared<Convert>(paddings, element::i64);
auto axis = make_shared<Constant>(element::i32, Shape{}, 1);
auto index_zero = make_shared<Constant>(element::i32, Shape{1}, 0);
auto index_one = make_shared<Constant>(element::i32, Shape{1}, 1);
auto unsqueeze_pad_begin = make_shared<Gather>(paddings_i64, index_zero, axis);
auto unsqueeze_pad_end = make_shared<Gather>(paddings_i64, index_one, axis);
pads_begin = make_shared<Squeeze>(unsqueeze_pad_begin, axis);
pads_end = make_shared<Squeeze>(unsqueeze_pad_end, axis);
}
static OutputVector translate_pad_base_op(const NodeContext& node,
const Output<Node>& input,
const Output<Node>& paddings,
const Output<Node>& constant_value) {
auto pad_mode = ov::op::PadMode::CONSTANT;
// prepare pads_begin and pads_end for OpenVINO Pad
shared_ptr<Node> pads_begin, pads_end;
slice_pads_begin_end(paddings, pads_begin, pads_end);
auto pad = make_shared<Pad>(input, pads_begin, pads_end, constant_value, pad_mode);
set_node_name(node.get_name(), pad);
return {pad};
}
OutputVector translate_pad_op(const NodeContext& node) {
auto ng_input = node.get_input(0), ng_paddings_op = node.get_input(1);
Output<Node> pad_val_op;
default_op_checks(node, 2, {"Pad"});
auto input = node.get_input(0);
auto paddings = node.get_input(1);
auto constant_value = make_shared<Constant>(input.get_element_type(), Shape{}, 0);
// Set inputs and pad_val_op
auto op_type = node.get_op_type();
if (op_type == "Pad" || op_type == "MirrorPad") {
pad_val_op = make_shared<Constant>(ng_input.get_element_type(), Shape(), std::vector<int>({0}));
} else if (op_type == "PadV2") {
pad_val_op = node.get_input(2);
} else {
TENSORFLOW_OP_VALIDATION(node, false, "Incorrect TF Pad OpType: " + node.get_op_type());
}
return translate_pad_base_op(node, input, paddings, constant_value);
}
// Set pad_mode
auto pad_mode = ov::op::PadMode::CONSTANT;
if (op_type == "MirrorPad") {
auto pad_mode_str = node.get_attribute<std::string>("mode");
if (pad_mode_str == "REFLECT") {
pad_mode = ov::op::PadMode::REFLECT;
} else if (pad_mode_str == "SYMMETRIC") {
pad_mode = ov::op::PadMode::SYMMETRIC;
} else {
TENSORFLOW_OP_VALIDATION(node, false, pad_mode_str + " is not an allowed padding mode.");
}
}
OutputVector translate_padv2_op(const NodeContext& node) {
default_op_checks(node, 3, {"PadV2"});
auto input = node.get_input(0);
auto paddings = node.get_input(1);
auto constant_value = node.get_input(2);
// Set pads_begin & pads_end (from the pad_val_op)
std::vector<int64_t> paddings;
get_const_input(node, 1, &paddings);
if (paddings.size() % 2 != 0) {
TENSORFLOW_OP_VALIDATION(node,
false,
"Constant node for paddings does not have an even number of "
"elements");
}
std::vector<int64_t> pad_begin(paddings.size() / 2);
std::vector<int64_t> pad_end(paddings.size() / 2);
for (size_t i = 0; i < paddings.size() / 2; i++) {
pad_begin[i] = paddings[2 * i];
pad_end[i] = paddings[2 * i + 1];
}
auto pads_begin_node = make_shared<Constant>(element::i64, Shape{pad_begin.size()}, pad_begin);
auto pads_end_node = make_shared<Constant>(element::i64, Shape{pad_end.size()}, pad_end);
return translate_pad_base_op(node, input, paddings, constant_value);
}
// Create final Op
auto res = make_shared<Pad>(ng_input, pads_begin_node, pads_end_node, pad_val_op, pad_mode);
set_node_name(node.get_name(), res);
return res->outputs();
OutputVector translate_mirror_pad_op(const NodeContext& node) {
default_op_checks(node, 2, {"MirrorPad"});
auto input = node.get_input(0);
auto paddings = node.get_input(1);
// retrieve attributes
auto mode = node.get_attribute<std::string>("mode");
auto pad_mode = convert_padding_mode(node, mode);
// prepare pads_begin and pads_end for OpenVINO Pad
shared_ptr<Node> pads_begin, pads_end;
slice_pads_begin_end(paddings, pads_begin, pads_end);
auto pad = make_shared<Pad>(input, pads_begin, pads_end, pad_mode);
set_node_name(node.get_name(), pad);
return {pad};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov
} // namespace ov

View File

@ -77,15 +77,17 @@ OP_CONVERTER(translate_lrn_op);
OP_CONVERTER(translate_mat_mul_op);
OP_CONVERTER(translate_matrix_diag_op);
OP_CONVERTER(translate_max_pool_op);
OP_CONVERTER(translate_mirror_pad_op);
OP_CONVERTER(translate_non_max_suppression_op);
OP_CONVERTER(translate_normalize_l2_op);
OP_CONVERTER(translate_pad_op);
OP_CONVERTER(translate_parallel_dynamic_stitch_op);
OP_CONVERTER(translate_placeholder_op);
OP_CONVERTER(translate_placeholder_with_default_op);
OP_CONVERTER(translate_no_op);
OP_CONVERTER(translate_one_hot_op);
OP_CONVERTER(translate_pack_op);
OP_CONVERTER(translate_pad_op);
OP_CONVERTER(translate_padv2_op);
OP_CONVERTER(translate_range_op);
OP_CONVERTER(translate_rank_op);
OP_CONVERTER(translate_random_uniform_op);
@ -253,7 +255,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"MaxPool", translate_max_pool_op},
{"MaxPoolV2", translate_max_pool_op},
{"MaxPool3D", translate_max_pool_op},
{"MirrorPad", translate_pad_op},
{"MirrorPad", translate_mirror_pad_op},
{"NonMaxSuppression", translate_non_max_suppression_op},
{"NonMaxSuppressionV2", translate_non_max_suppression_op},
{"NonMaxSuppressionV3", translate_non_max_suppression_op},
@ -264,7 +266,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"OneHot", translate_one_hot_op},
{"Pack", translate_pack_op},
{"Pad", translate_pad_op},
{"PadV2", translate_pad_op},
{"PadV2", translate_padv2_op},
{"DynamicStitch", translate_parallel_dynamic_stitch_op},
{"ParallelDynamicStitch", translate_parallel_dynamic_stitch_op},
{"Placeholder", translate_placeholder_op},

View File

@ -9,9 +9,11 @@
#include "openvino_conversions.hpp"
using namespace ov;
using namespace ov::op;
using namespace ov::opset10;
using namespace ov::opset8;
using namespace std;
using namespace ov::frontend::tensorflow;
void ov::frontend::tensorflow::set_node_name(const std::string& node_name, const std::shared_ptr<Node>& node) {
const auto& outputs = node->outputs();
@ -42,14 +44,15 @@ ov::op::PadType ov::frontend::tensorflow::convert_tf_padding(const ov::frontend:
"AvgPool",
"AvgPool3D"};
auto op_type = node.get_op_type();
TENSORFLOW_OP_VALIDATION(node,
supported_ops.count(op_type),
"Conversion of padding mode for " + op_type + " is not supported.");
TENSORFLOW_OP_VALIDATION(
node,
tf_padding == "VALID" || tf_padding == "SAME" || tf_padding == "EXPLICIT",
"The deconvolutional operation must have one of the padding type: VALID, SAME, and EXPLICIT.");
supported_ops.count(op_type),
"OpenVINO TensorFlow Frontend does not support conversion of padding type for " + op_type + " operation.");
std::set<std::string> supported_modes = {"VALID", "SAME", "EXPLICIT"};
TENSORFLOW_OP_VALIDATION(node,
supported_modes.count(tf_padding),
"OpenVINO TensorFlow Frontend does not support " + tf_padding + " padding mode.");
if (tf_padding == "VALID") {
return ov::op::PadType::VALID;
@ -216,6 +219,28 @@ ov::Output<ov::Node> ov::frontend::tensorflow::get_elements_number_1d(const ov::
return num_elements;
}
PadMode ov::frontend::tensorflow::convert_padding_mode(const NodeContext& node, const std::string& padding_mode) {
std::set<std::string> supported_ops = {"MirrorPad"};
auto op_type = node.get_op_type();
TENSORFLOW_OP_VALIDATION(
node,
supported_ops.count(op_type),
"OpenVINO TensorFlow Frontend does not support conversion of padding mode for " + op_type + " operation.");
std::set<std::string> supported_modes = {"REFLECT", "SYMMETRIC"};
TENSORFLOW_OP_VALIDATION(node,
supported_modes.count(padding_mode),
"OpenVINO TensorFlow Frontend does not support " + padding_mode + " padding mode.");
if (padding_mode == "REFLECT") {
return PadMode::REFLECT;
} else if (padding_mode == "SYMMETRIC") {
return PadMode::SYMMETRIC;
}
return PadMode::REFLECT;
}
Output<Node> ov::frontend::tensorflow::compute_subgraph_scalar_rank(const Output<Node>& output,
element::Type output_type,
bool as_scalar) {

View File

@ -52,6 +52,8 @@ ov::Output<Node> get_elements_number_1d(const Output<Node>& output,
ov::element::Type output_type,
ov::pass::NodeRegistry& rg);
ov::op::PadMode convert_padding_mode(const NodeContext& node, const std::string& padding_mode);
Output<Node> compute_subgraph_scalar_rank(const Output<Node>& output,
element::Type output_type,
bool as_scalar = false);

View File

@ -0,0 +1,45 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.tf_layer_test_class import CommonTFLayerTest
class TestPad(CommonTFLayerTest):
def create_pad_net(self, input_shape, pads_values, const_value, pad_mode, pad_op):
import tensorflow as tf
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
paddings = tf.constant(pads_values, dtype=tf.int32)
placeholder = tf.compat.v1.placeholder(tf.float32, input_shape, 'input')
if pad_op == 'Pad':
tf.raw_ops.Pad(input=placeholder, paddings=paddings, name='pad')
elif pad_op == 'PadV2':
constant_values = tf.constant(const_value, dtype=tf.float32)
tf.raw_ops.PadV2(input=placeholder, paddings=paddings, constant_values=constant_values, name='pad')
elif pad_op == 'MirrorPad':
tf.raw_ops.MirrorPad(input=placeholder, paddings=paddings, mode=pad_mode, name='pad')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
ref_net = None
return tf_net, ref_net
test_data_basic = [
dict(input_shape=[2, 3], pads_values=[[0, 1], [2, 3]], const_value=None, pad_mode=None, pad_op='Pad'),
dict(input_shape=[2, 4, 3], pads_values=[[1, 2], [3, 4], [1, 1]], const_value=3, pad_mode=None, pad_op='PadV2'),
dict(input_shape=[5, 6], pads_values=[[0, 1], [2, 3]], const_value=None, pad_mode='REFLECT',
pad_op='MirrorPad'),
dict(input_shape=[4, 6], pads_values=[[2, 1], [3, 1]], const_value=None, pad_mode='SYMMETRIC',
pad_op='MirrorPad'),
]
@pytest.mark.parametrize("params", test_data_basic)
@pytest.mark.precommit_tf_fe
@pytest.mark.nightly
def test_pad_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
use_old_api):
self._test(*self.create_pad_net(**params),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, use_old_api=use_old_api)