From 24ddf1b274a830e87797ea50ad7843df8ecf6029 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Fri, 18 Aug 2023 13:39:59 +0400 Subject: [PATCH] [TF FE] Use regular Convolution in case dynamic input channels (#19253) * [TF FE] Use regular Convolution in case dynamic input channels This solution is aligned with the legacy frontend but it has limitation. This is a temporal solution until the core obtains ShapeOf evaluator. Signed-off-by: Kazantsev, Roman * Remove unused variable from the test Signed-off-by: Kazantsev, Roman * Fix unit-test * Update mo unit-test --------- Signed-off-by: Kazantsev, Roman --- .../tests/convert_tricky_models.cpp | 26 +++++++++++++++++++ ...enerate_conv_with_dynamic_input_channel.py | 19 ++++++++++++++ src/frontends/tensorflow_common/src/utils.cpp | 12 +++++---- .../moc_tf_fe/conversion_with_layout_test.py | 2 +- 4 files changed, 53 insertions(+), 6 deletions(-) create mode 100644 src/frontends/tensorflow/tests/test_models/gen_scripts/generate_conv_with_dynamic_input_channel.py diff --git a/src/frontends/tensorflow/tests/convert_tricky_models.cpp b/src/frontends/tensorflow/tests/convert_tricky_models.cpp index ec493a6876b..f7ef57511ea 100644 --- a/src/frontends/tensorflow/tests/convert_tricky_models.cpp +++ b/src/frontends/tensorflow/tests/convert_tricky_models.cpp @@ -749,3 +749,29 @@ TEST_F(FrontEndConversionWithReferenceTestsF, TF1IfWithNonExistentOpInBranch) { model_ref = make_shared(OutputVector{mul}, ParameterVector{y, ind}); } } + +TEST_F(FrontEndConversionWithReferenceTestsF, ConvolutionWithDynamicInputChannel) { + // This test aims to check conversion of a model with convolution of dynamic input channel + // Namely, the resulted model must contain the regular convolution, not grouped convolution + { model = convert_model("conv_with_dynamic_input_channel"); } + { + auto input = make_shared(f32, PartialShape{Dimension::dynamic(), 10, 10, Dimension::dynamic()}); + + auto transpose_order = make_shared(i64, Shape{4}, vector{0, 3, 1, 2}); + auto transpose = make_shared(input, transpose_order); + + auto filter = make_shared(element::f32, Shape{6, 6, 3, 3}, vector(6 * 6 * 3 * 3, 0.0f)); + auto conv = make_shared(transpose, + filter, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}, + op::PadType::SAME_UPPER); + + auto transpose_order_back = make_shared(i64, Shape{4}, vector{0, 2, 3, 1}); + auto transpose_back = make_shared(conv, transpose_order_back); + + model_ref = make_shared(OutputVector{transpose_back}, ParameterVector{input}); + } +} diff --git a/src/frontends/tensorflow/tests/test_models/gen_scripts/generate_conv_with_dynamic_input_channel.py b/src/frontends/tensorflow/tests/test_models/gen_scripts/generate_conv_with_dynamic_input_channel.py new file mode 100644 index 00000000000..f6852a33f17 --- /dev/null +++ b/src/frontends/tensorflow/tests/test_models/gen_scripts/generate_conv_with_dynamic_input_channel.py @@ -0,0 +1,19 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys + +import tensorflow as tf + +# Create the graph and model +tf.compat.v1.reset_default_graph() +with tf.compat.v1.Session() as sess: + filter = tf.constant(value=0, shape=[3, 3, 6, 6], dtype=tf.float32) + input = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 10, 10, None], name='input') + conv = tf.raw_ops.Conv2D(input=input, + filter=filter, + strides=[1, 1, 1, 1], + padding='SAME') + tf.compat.v1.saved_model.simple_save(sess, os.path.join(sys.argv[1], "conv_with_dynamic_input_channel"), + inputs={'input': input}, outputs={'conv': conv}) diff --git a/src/frontends/tensorflow_common/src/utils.cpp b/src/frontends/tensorflow_common/src/utils.cpp index 4d727cc1d77..5e65bb7dae2 100644 --- a/src/frontends/tensorflow_common/src/utils.cpp +++ b/src/frontends/tensorflow_common/src/utils.cpp @@ -208,11 +208,7 @@ OutputVector translate_convolution_op(const frontend::NodeContext& node, size_t } Output conv; - if (input_channels_static && num_groups == 1) { - // regular convolutional operation - // we assume that input channel size will not be changed if they are already static - conv = make_shared(input, filter, strides, pads_begin, pads_end, dilations, auto_pad); - } else { + if (input_channels_static && num_groups > 1) { // grouped convolutional operation // compute input channels given from the input and the filter // and number of groups required to split the filter @@ -233,6 +229,12 @@ OutputVector translate_convolution_op(const frontend::NodeContext& node, size_t auto filter_new_shape = make_shared(OutputVector{num_groups, filter_new_cout, shape_cin_xy}, 0); auto new_filter = make_shared(filter, filter_new_shape, false); conv = make_shared(input, new_filter, strides, pads_begin, pads_end, dilations, auto_pad); + } else { + // assumption to use regular convolution for all other cases is taken from the legacy frontend + // this solution is sufficient for all observed models in the validation + // in general, it has limitation and it needs to use grouped convolution when num_groups is not static + // 118107: remove this assumtpion when it obtains complete shape propagation in the core + conv = make_shared(input, filter, strides, pads_begin, pads_end, dilations, auto_pad); } convert_nchw_to_nhwc(is_nhwc, conv, Rank(spatial_dims_num + 2)); diff --git a/tools/mo/unit_tests/moc_tf_fe/conversion_with_layout_test.py b/tools/mo/unit_tests/moc_tf_fe/conversion_with_layout_test.py index 0a3264e7fcf..9fc4f0cb1e6 100644 --- a/tools/mo/unit_tests/moc_tf_fe/conversion_with_layout_test.py +++ b/tools/mo/unit_tests/moc_tf_fe/conversion_with_layout_test.py @@ -68,7 +68,7 @@ class TestConversionWithBatchAndLayout(unittest.TestCase): *[ ( "model_with_convolution_dynamic_rank.pbtxt", 7, "x(n???),kernel(????)", - {"x": PartialShape([7, Dimension.dynamic(), Dimension.dynamic(), Dimension.dynamic()]), + {"x": PartialShape([7, Dimension.dynamic(), Dimension.dynamic(), 3]), "kernel": PartialShape([2, 2, 3, 1])}, ), (