[MO][TF FE] Do not print TF FE message in case of fallback (#16384)

* [MO][TF FE] Do not print TF FE message in case of fallback

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Correct test model with Switch and Merge

---------

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev
2023-03-20 14:06:45 +04:00
committed by GitHub
parent f39684a7f8
commit 997414c64d
5 changed files with 176 additions and 3 deletions

View File

@@ -444,6 +444,8 @@ def prepare_ir(argv: argparse.Namespace):
f"The detailed reason why fallback was executed: not supported {reasons_message} were used. "
"You can specify --use_new_frontend flag to force using the Frontend MO path to avoid additional checks. " +
refer_to_faq_msg(105))
assert not hasattr(argv, 'is_fallback'), '`is_fallback` argument must not exist.'
argv.is_fallback = True
t.send_event("mo", "conversion_method", "mo_legacy")
graph = unified_pipeline(argv)

View File

@@ -47,7 +47,8 @@ def main(cli_parser: argparse.ArgumentParser, framework=None):
print(ov_update_message)
if ov_api20_message is not None and ngraph_function is not None:
print(ov_api20_message)
if not argv.use_legacy_frontend and is_tf:
is_fallback = getattr(argv, 'is_fallback', False)
if not argv.use_legacy_frontend and is_tf and not is_fallback:
# now TF FE is default frontend for TensorFlow models conversion
print(get_tf_fe_message())

View File

@@ -17,7 +17,8 @@ def arg_parse_helper(input_model,
use_new_frontend,
input_model_is_text,
framework,
compress_to_fp16=False):
compress_to_fp16=False,
freeze_placeholder_with_value=None):
path = os.path.dirname(__file__)
input_model = os.path.join(path, "test_models", input_model)
@@ -46,7 +47,7 @@ def arg_parse_helper(input_model,
layout={},
source_layout={},
target_layout={},
freeze_placeholder_with_value=None,
freeze_placeholder_with_value=freeze_placeholder_with_value,
data_type=None,
tensorflow_custom_operations_config_update=None,
compress_to_fp16=compress_to_fp16,
@@ -67,6 +68,21 @@ class TestInfoMessagesTFFE(unittest.TestCase):
assert tf_fe_message_found
class TestInfoMessagesTFFEWithFallback(unittest.TestCase):
@patch('argparse.ArgumentParser.parse_args',
return_value=arg_parse_helper(input_model="model_switch_merge.pbtxt",
use_legacy_frontend=False, use_new_frontend=False,
framework=None, input_model_is_text=True,
freeze_placeholder_with_value="is_training->False"))
def test_tf_fe_message_fallback(self, mock_argparse):
f = io.StringIO()
with redirect_stdout(f):
main(argparse.ArgumentParser())
std_out = f.getvalue()
tf_fe_message_found = get_tf_fe_message() in std_out
assert not tf_fe_message_found, 'TF FE Info message is found for the fallback case'
class TestInfoMessagesCompressFP16(unittest.TestCase):
@patch('argparse.ArgumentParser.parse_args',
return_value=arg_parse_helper(input_model="model_int32.pbtxt",

View File

@@ -0,0 +1,134 @@
node {
name: "x"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: 2
}
dim {
size: 3
}
}
}
}
}
node {
name: "y"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: 2
}
dim {
size: 3
}
}
}
}
}
node {
name: "is_training"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_BOOL
}
}
attr {
key: "shape"
value {
shape {
}
}
}
}
node {
name: "Switch"
op: "Switch"
input: "x"
input: "is_training"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Relu"
op: "Relu"
input: "Switch"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Sigmoid"
op: "Sigmoid"
input: "Switch:1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Merge"
op: "Merge"
input: "Relu"
input: "Sigmoid"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "AddV2"
op: "AddV2"
input: "Merge"
input: "y"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "init"
op: "NoOp"
}
versions {
producer: 808
}

View File

@@ -0,0 +1,20 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import tensorflow.compat.v1 as tf
tf.reset_default_graph()
with tf.Session() as sess:
x = tf.placeholder(tf.float32, [2, 3], 'x')
y = tf.placeholder(tf.float32, [2, 3], 'y')
is_training = tf.placeholder(tf.bool, [], 'is_training')
switch = tf.raw_ops.Switch(data=x, pred=is_training)
relu = tf.raw_ops.Relu(features=switch[0])
sigmoid = tf.raw_ops.Sigmoid(x=switch[1])
merge = tf.raw_ops.Merge(inputs=[relu, sigmoid])
tf.raw_ops.AddV2(x=merge[0], y=y)
tf.global_variables_initializer()
tf_net = sess.graph_def
tf.io.write_graph(tf_net, './', 'model_switch_merge.pbtxt', True)