[TF FE] Workaround for Broadcast/Concat issue with empty tensors (#18140)
* Added transformation for Concat * Added test * CI fix * Fixed behavior of the "empty tensor list" test
This commit is contained in:
committed by
GitHub
parent
691630b68c
commit
fd48b0bbdc
@@ -23,6 +23,7 @@
|
||||
#include "openvino/util/log.hpp"
|
||||
#include "so_extension.hpp"
|
||||
#include "tf_framework_node.hpp"
|
||||
#include "transformations/common_optimizations/remove_concat_zero_dim_input.hpp"
|
||||
#include "transformations/common_optimizations/reverse_shape_and_type_infer.hpp"
|
||||
#include "transformations/transpose_sinking/ts_general.hpp"
|
||||
#include "translate_session.hpp"
|
||||
@@ -341,6 +342,7 @@ void FrontEnd::normalize(const std::shared_ptr<ov::Model>& model) const {
|
||||
manager.register_pass<pass::BlockLSTMReplacer>();
|
||||
manager.register_pass<pass::GRUBlockCellReplacer>();
|
||||
manager.register_pass<pass::ConstToResultRemover>();
|
||||
manager.register_pass<ov::pass::RemoveConcatZeroDimInput>();
|
||||
manager.register_pass<ov::pass::TransposeSinkingGeneral>();
|
||||
manager.register_pass<ov::pass::ReverseShapeAndTypeInfer>();
|
||||
manager.run_passes(model);
|
||||
|
||||
@@ -114,3 +114,13 @@ TEST_F(TransformationTestsF, SavedModelMultipleTensorNames) {
|
||||
model_ref = make_shared<Model>(OutputVector{result}, ParameterVector{x});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, SavedModelBroadcastIssue) {
|
||||
{ model = convert_model("saved_model_broadcast_issue"); }
|
||||
{
|
||||
// create a reference graph
|
||||
auto x = make_shared<Constant>(element::i64, Shape{2, 2}, vector<int64_t>{1, 2, -1, -1});
|
||||
|
||||
model_ref = make_shared<Model>(OutputVector{x}, ParameterVector{});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -383,8 +383,7 @@ TEST_F(TransformationTestsF, ModelWithEmptyTensorListAndPushBack) {
|
||||
auto x_flatten = make_shared<Reshape>(x, minus_one_const, false);
|
||||
auto zero_const = make_shared<Constant>(i32, Shape{1}, 0);
|
||||
auto x_unsqueeze_flatten = make_shared<Unsqueeze>(x_flatten, zero_const);
|
||||
auto empty_const = make_shared<Constant>(f32, Shape{0, 30}, vector<float>{});
|
||||
auto list_push_back = make_shared<Concat>(OutputVector{empty_const, x_unsqueeze_flatten}, 0);
|
||||
auto list_push_back = make_shared<Concat>(OutputVector{x_unsqueeze_flatten}, 0);
|
||||
auto list_push_back_shape = make_shared<ShapeOf>(list_push_back, element::i32);
|
||||
auto start = make_shared<Constant>(i32, Shape{1}, 0);
|
||||
auto stop = make_shared<Constant>(i32, Shape{1}, 1);
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
# Copyright (C) 2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
|
||||
# Create the graph and model
|
||||
tf.compat.v1.reset_default_graph()
|
||||
with tf.compat.v1.Session() as sess:
|
||||
tf_x = tf.constant(np.array([[1, 2]], dtype=np.dtype('i8')))
|
||||
tf_cast = tf.raw_ops.Cast(x=-1, DstT=tf.int64)
|
||||
tf_fill = tf.fill([0, 2], tf_cast, name="FillOperation")
|
||||
tf_fill2 = tf.fill([1, 2], tf_cast, name="FillOperation2")
|
||||
tf_output = tf.raw_ops.ConcatV2(values=[tf_x, tf_fill, tf_fill2], axis=0, name="ConcatOperation")
|
||||
tf.compat.v1.saved_model.simple_save(sess, os.path.join(sys.argv[1], "saved_model_broadcast_issue"), inputs={'x':tf_x}, outputs={'output':tf_output})
|
||||
Reference in New Issue
Block a user