[TF FE] Support ClipByValue operation (#15246)

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev 2023-01-23 10:41:06 +04:00 committed by GitHub
parent d4cb719213
commit 5c10e3741e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 85 additions and 0 deletions

View File

@ -0,0 +1,34 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op_table.hpp"
#include "openvino/opsets/opset10.hpp"
using namespace std;
using namespace ov::opset10;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_clip_by_value_op(const NodeContext& node) {
default_op_checks(node, 3, {"ClipByValue"});
auto t = node.get_input(0);
auto clip_value_min = node.get_input(1);
auto clip_value_max = node.get_input(2);
// it can be case that clip_value_min > clip_value_max
// in this case both values are equal to clip_value_min
clip_value_max = make_shared<Maximum>(clip_value_min, clip_value_max);
auto clip_by_min = make_shared<Maximum>(t, clip_value_min);
auto clip_by_max = make_shared<Minimum>(clip_by_min, clip_value_max);
set_node_name(node.get_name(), clip_by_max);
return {clip_by_max};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -37,6 +37,7 @@ OP_CONVERTER(translate_broadcast_args_op);
OP_CONVERTER(translate_broadcast_to_op);
OP_CONVERTER(translate_bucketize_op);
OP_CONVERTER(translate_cast_op);
OP_CONVERTER(translate_clip_by_value_op);
OP_CONVERTER(translate_concat_op);
OP_CONVERTER(translate_const_op);
OP_CONVERTER(translate_conv_2d_op);
@ -221,6 +222,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"Bucketize", translate_bucketize_op},
{"BiasAdd", translate_bias_add_op},
{"Cast", translate_cast_op},
{"ClipByValue", translate_clip_by_value_op},
{"Concat", translate_concat_op},
{"ConcatV2", translate_concat_op},
{"Const", translate_const_op},

View File

@ -0,0 +1,49 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import tensorflow as tf
from common.tf_layer_test_class import CommonTFLayerTest
class TestClipByValue(CommonTFLayerTest):
def _prepare_input(self, inputs_info):
assert 't' in inputs_info, "Test error: inputs_info must contain `t`"
t_shape = inputs_info['t']
clip_value_min_shape = inputs_info['clip_value_min']
clip_value_max_shape = inputs_info['clip_value_max']
inputs_data = {}
inputs_data['t'] = np.random.randint(-10, 10, t_shape).astype(np.float32)
inputs_data['clip_value_min'] = np.random.randint(-10, 10, clip_value_min_shape).astype(np.float32)
inputs_data['clip_value_max'] = np.random.randint(-10, 10, clip_value_max_shape).astype(np.float32)
return inputs_data
def create_clip_by_value_net(self, t_shape):
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
t = tf.compat.v1.placeholder(tf.float32, t_shape, 't')
clip_value_min = tf.compat.v1.placeholder(tf.float32, [], 'clip_value_min')
clip_value_max = tf.compat.v1.placeholder(tf.float32, [], 'clip_value_max')
tf.raw_ops.ClipByValue(t=t, clip_value_min=clip_value_min, clip_value_max=clip_value_max)
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
return tf_net, None
test_data_basic = [
dict(t_shape=[]),
dict(t_shape=[2, 3]),
dict(t_shape=[4, 1, 3]),
]
@pytest.mark.parametrize("params", test_data_basic)
@pytest.mark.precommit_tf_fe
def test_clip_by_value_basic(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, use_old_api):
self._test(
*self.create_clip_by_value_net(**params), ie_device,
precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend,
use_old_api=use_old_api, **params)