[TF FE] Use regular Convolution in case dynamic input channels (#19253)

* [TF FE] Use regular Convolution in case dynamic input channels

This solution is aligned with the legacy frontend but it has limitation.
This is a temporal solution until the core obtains ShapeOf evaluator.

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Remove unused variable from the test

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix unit-test

* Update mo unit-test

---------

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev 2023-08-18 13:39:59 +04:00 committed by GitHub
parent ef33c2b3fd
commit 24ddf1b274
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 53 additions and 6 deletions

View File

@ -749,3 +749,29 @@ TEST_F(FrontEndConversionWithReferenceTestsF, TF1IfWithNonExistentOpInBranch) {
model_ref = make_shared<Model>(OutputVector{mul}, ParameterVector{y, ind});
}
}
TEST_F(FrontEndConversionWithReferenceTestsF, ConvolutionWithDynamicInputChannel) {
// This test aims to check conversion of a model with convolution of dynamic input channel
// Namely, the resulted model must contain the regular convolution, not grouped convolution
{ model = convert_model("conv_with_dynamic_input_channel"); }
{
auto input = make_shared<Parameter>(f32, PartialShape{Dimension::dynamic(), 10, 10, Dimension::dynamic()});
auto transpose_order = make_shared<Constant>(i64, Shape{4}, vector<int32_t>{0, 3, 1, 2});
auto transpose = make_shared<Transpose>(input, transpose_order);
auto filter = make_shared<Constant>(element::f32, Shape{6, 6, 3, 3}, vector<float>(6 * 6 * 3 * 3, 0.0f));
auto conv = make_shared<Convolution>(transpose,
filter,
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1},
op::PadType::SAME_UPPER);
auto transpose_order_back = make_shared<Constant>(i64, Shape{4}, vector<int32_t>{0, 2, 3, 1});
auto transpose_back = make_shared<Transpose>(conv, transpose_order_back);
model_ref = make_shared<Model>(OutputVector{transpose_back}, ParameterVector{input});
}
}

View File

@ -0,0 +1,19 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import tensorflow as tf
# Create the graph and model
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
filter = tf.constant(value=0, shape=[3, 3, 6, 6], dtype=tf.float32)
input = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 10, 10, None], name='input')
conv = tf.raw_ops.Conv2D(input=input,
filter=filter,
strides=[1, 1, 1, 1],
padding='SAME')
tf.compat.v1.saved_model.simple_save(sess, os.path.join(sys.argv[1], "conv_with_dynamic_input_channel"),
inputs={'input': input}, outputs={'conv': conv})

View File

@ -208,11 +208,7 @@ OutputVector translate_convolution_op(const frontend::NodeContext& node, size_t
}
Output<Node> conv;
if (input_channels_static && num_groups == 1) {
// regular convolutional operation
// we assume that input channel size will not be changed if they are already static
conv = make_shared<Convolution>(input, filter, strides, pads_begin, pads_end, dilations, auto_pad);
} else {
if (input_channels_static && num_groups > 1) {
// grouped convolutional operation
// compute input channels given from the input and the filter
// and number of groups required to split the filter
@ -233,6 +229,12 @@ OutputVector translate_convolution_op(const frontend::NodeContext& node, size_t
auto filter_new_shape = make_shared<Concat>(OutputVector{num_groups, filter_new_cout, shape_cin_xy}, 0);
auto new_filter = make_shared<Reshape>(filter, filter_new_shape, false);
conv = make_shared<GroupConvolution>(input, new_filter, strides, pads_begin, pads_end, dilations, auto_pad);
} else {
// assumption to use regular convolution for all other cases is taken from the legacy frontend
// this solution is sufficient for all observed models in the validation
// in general, it has limitation and it needs to use grouped convolution when num_groups is not static
// 118107: remove this assumtpion when it obtains complete shape propagation in the core
conv = make_shared<Convolution>(input, filter, strides, pads_begin, pads_end, dilations, auto_pad);
}
convert_nchw_to_nhwc(is_nhwc, conv, Rank(spatial_dims_num + 2));

View File

@ -68,7 +68,7 @@ class TestConversionWithBatchAndLayout(unittest.TestCase):
*[
(
"model_with_convolution_dynamic_rank.pbtxt", 7, "x(n???),kernel(????)",
{"x": PartialShape([7, Dimension.dynamic(), Dimension.dynamic(), Dimension.dynamic()]),
{"x": PartialShape([7, Dimension.dynamic(), Dimension.dynamic(), 3]),
"kernel": PartialShape([2, 2, 3, 1])},
),
(