[TF FE] Handle intermediate outputs in SavedModel (#18970)

This commit is contained in:
Roman Kazantsev 2023-08-03 23:36:00 +04:00 committed by GitHub
parent 561f71d86c
commit 625d50c8c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 69 additions and 17 deletions

View File

@ -237,20 +237,38 @@ void InputModel::InputModelTFImpl::load_places() {
m_telemetry->send_event("op_count", "tf_" + op.first, static_cast<int>(op.second));
}
}
m_graph_iterator->reset();
m_outputs.clear();
// SavedModel, MetaGraph formats have model signature that provides a concrete list of outputs
// some output can place among intermediate layers (i.e. it can have its output consumers)
// so just terminal nodes may not cover the whole list of outputs
if (m_saved_model_output_names) {
for (const auto& map_name : *m_saved_model_output_names) {
const auto& output_internal_tensor_name = map_name.first;
auto output_place = std::make_shared<TensorPlace>(m_input_model,
ov::PartialShape({}),
ov::element::dynamic,
std::vector<std::string>{output_internal_tensor_name});
m_tensor_places[output_internal_tensor_name] = output_place;
m_outputs.push_back(output_place);
}
return;
}
// treat terminal nodes as the models outputs for the frozen TF1 format
std::set<std::string> op_names_without_consumers;
std::set_difference(all_op_names.begin(),
all_op_names.end(),
op_names_with_consumers.begin(),
op_names_with_consumers.end(),
std::inserter(op_names_without_consumers, op_names_without_consumers.begin()));
m_graph_iterator->reset();
m_outputs.clear();
for (auto& output_name : op_names_without_consumers) {
std::vector<std::string> output_names = {output_name};
auto output_place =
std::make_shared<TensorPlace>(m_input_model, ov::PartialShape({}), ov::element::dynamic, output_names);
for (const auto& output_name : op_names_without_consumers) {
auto output_place = std::make_shared<TensorPlace>(m_input_model,
ov::PartialShape({}),
ov::element::dynamic,
std::vector<std::string>{output_name});
m_tensor_places[output_name] = output_place;
m_outputs.push_back(output_place);
}

View File

@ -24,8 +24,8 @@ void copy_conditional_flow_marker_with_branches(const CfMarkerType& copy_from,
unordered_map<uint32_t, SetOfBranchIndices>& copy_to_braches) {
for (const auto& marker : copy_from.existing_markers_with_branches) {
const auto& switch_marker = marker.first;
const auto& branch_markers = marker.second;
copy_to_braches[switch_marker].insert(branch_markers.begin(), branch_markers.end());
const auto& branch_indices = marker.second;
copy_to_braches[switch_marker].insert(branch_indices.begin(), branch_indices.end());
}
}
@ -33,8 +33,8 @@ void copy_conditional_flow_marker_with_switches(const CfMarkerType& copy_from,
unordered_map<uint32_t, SetOfSwitchNodes>& copy_to_switches) {
for (const auto& marker : copy_from.existing_markers_with_switches) {
const auto& switch_marker = marker.first;
const auto& branch_markers = marker.second;
copy_to_switches[switch_marker].insert(branch_markers.begin(), branch_markers.end());
const auto& switch_nodes = marker.second;
copy_to_switches[switch_marker].insert(switch_nodes.begin(), switch_nodes.end());
}
}
@ -334,14 +334,11 @@ bool propagate_conditional_flow(const OutputVector& ov_inputs,
resulted_cf_marker.existing_markers_with_branches = combined_markers_with_branches;
resulted_cf_marker.existing_markers_with_switches = combined_markers_with_switches;
} else {
// check that non-Merge node does not expect data/flow from different Switch branches
// it means inconsistent model
// non-Merge nodes can contain both branch markers of the same conditional flow
// it can happen if some conditional edge is going directly from Switch node to this non-Merge node
// it means that the output value is external for If represented with Switch-Merge nodes
// and must be executed before If
for (const auto& marker : combined_markers_with_branches) {
auto branch_markers = marker.second;
FRONT_END_GENERAL_CHECK(
branch_markers.size() < 2,
"[TensorFlow Frontend] inconsistent input model: non-Merge node expects data or "
"flow from different branches of one Switch node");
resulted_cf_marker.existing_markers_with_branches.insert(marker);
}
resulted_cf_marker.existing_markers_with_switches.insert(combined_markers_with_switches.begin(),

View File

@ -138,3 +138,22 @@ TEST_F(FrontEndConversionWithReferenceTestsF, SavedModelMultiGraph) {
model_ref = make_shared<Model>(OutputVector{add}, ParameterVector{y});
}
}
TEST_F(FrontEndConversionWithReferenceTestsF, SavedModelWithIntermediateOutput) {
// The test aims to check that output from intermediate layers presented in the model signature
// must be preserved
{
model = convert_model("saved_model_intermediate_output");
ASSERT_TRUE(model->get_results().size() == 2);
}
{
// create a reference graph
auto input1 = make_shared<Parameter>(element::f32, Shape{2});
auto input2 = make_shared<Parameter>(element::f32, Shape{2});
auto add = make_shared<Add>(input1, input2);
auto sub = make_shared<Subtract>(input2, add);
auto result1 = make_shared<Result>(add);
auto result2 = make_shared<Result>(sub);
model_ref = make_shared<Model>(OutputVector{result1, result2}, ParameterVector{input1, input2});
}
}

View File

@ -0,0 +1,18 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import tensorflow as tf
# Create the graph and model
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
input1 = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2], name='input1')
input2 = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2], name='input1')
output1 = tf.add(input1, input2, name="output1")
output2 = tf.subtract(output1, input2, name="output2")
tf.compat.v1.saved_model.simple_save(sess, os.path.join(sys.argv[1], "saved_model_intermediate_output"),
inputs={'input1': input1, 'input2': input2},
outputs={'output1': output1, 'output2': output2})