[TF FE] Support TF1 While Control flow (#20105)

* [TF FE] Support TF1 While Control flow

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Apply code-style fix

* Update API for OpPlace to store back edge

* Fix build: no rvalue by reference passing

* Fix build issue: correct type

* Fix TF FE unit-tests

* Apply code-review feedback: remove unused vars

* Fix fusing complicated case of TF1 While

* Remove unused variable

* Update MO unit test

* Fix layer tests for While

* Handle Switch and NextIteration nodes connected directly

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

---------

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev 2023-10-02 09:56:10 +04:00 committed by GitHub
parent e2501a67d2
commit b409ea1930
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 694 additions and 168 deletions

View File

@ -137,6 +137,10 @@ void InputModel::InputModelTFImpl::load_places() {
auto op_name = node_decoder->get_op_name();
auto op_type = node_decoder->get_op_type();
if (op_type == "Placeholder" && op_name.rfind("unused_control_flow_input", 0) != std::string::npos) {
continue;
}
if (m_telemetry) {
op_statistics[op_type]++;
}
@ -320,9 +324,6 @@ std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologicall
std::stack<std::shared_ptr<OpPlace>> ops_to_do;
std::unordered_set<std::shared_ptr<OpPlace>> ops_done;
// TODO: implement logic to check direct cycles in the graph
// and break them
// probably not only NextIteration can generate cycles
for (const auto& output_place : m_outputs) {
FRONT_END_GENERAL_CHECK(output_place->get_names().size() > 0, "TensorPlace must have at least one name.");
auto output_place_name = output_place->get_names()[0];
@ -336,6 +337,23 @@ std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologicall
ops_to_do.push(output_operation_place);
}
// walk through all NextIteration nodes and put their producers into ops_to_do
// this is needed to avoid missed nodes in the body graph of TF1 While operation
for (const auto& op_place : m_op_places) {
auto op_decoder = op_place->get_decoder();
if (op_decoder->get_op_type() == "NextIteration") {
std::string producer_name;
std::string producer_output_port_name;
size_t producer_output_port_idx;
op_decoder->get_input_node(0, producer_name, producer_output_port_name, producer_output_port_idx);
FRONT_END_GENERAL_CHECK(m_op_places_map.count(producer_name),
"[TensorFlow Frontend] internal error or inconsistent model: producer of "
"NextIteration is not found among operation places " +
producer_name);
ops_to_do.push(m_op_places_map.at(producer_name));
}
}
// the traversing algorithm to compute topologically sorted nodes is taken from topological_sort in
// core/graph_util.hpp
while (ops_to_do.size() > 0) {
@ -350,6 +368,14 @@ std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologicall
if (current_operation_type == "NextIteration") {
// break the cycle created by NextIteration
input_count = 0;
std::string producer_name;
std::string producer_output_port_name;
size_t producer_output_port_idx;
current_operation_decoder->get_input_node(0,
producer_name,
producer_output_port_name,
producer_output_port_idx);
current_operation_place->set_next_iteration_back_edge(producer_name, producer_output_port_idx);
}
for (size_t input_port_idx = 0; input_port_idx < input_count; ++input_port_idx) {

View File

@ -0,0 +1,34 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "helper_ops/enter.hpp"
#include "common_op_table.hpp"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "utils.hpp"
using namespace std;
using namespace ov;
using namespace ov::frontend::tensorflow;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_enter_op(const NodeContext& node) {
default_op_checks(node, 1, {"Enter"});
auto data = node.get_input(0);
auto frame_name = node.get_attribute<string>("frame_name");
auto enter_node = make_shared<Enter>(data, frame_name, node.get_decoder());
set_node_name(node.get_name(), enter_node);
return enter_node->outputs();
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "helper_ops/exit.hpp"
#include "common_op_table.hpp"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "utils.hpp"
using namespace std;
using namespace ov;
using namespace ov::frontend::tensorflow;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_exit_op(const NodeContext& node) {
default_op_checks(node, 1, {"Exit"});
auto data = node.get_input(0);
auto exit_node = make_shared<Exit>(data, node.get_decoder());
set_node_name(node.get_name(), exit_node);
return exit_node->outputs();
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,34 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "helper_ops/loop_cond.hpp"
#include "common_op_table.hpp"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "utils.hpp"
using namespace std;
using namespace ov;
using namespace ov::op;
using namespace ov::frontend::tensorflow;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_loop_cond_op(const NodeContext& node) {
default_op_checks(node, 1, {"LoopCond"});
auto input = node.get_input(0);
auto loop_cond_node = make_shared<LoopCond>(input, node.get_decoder());
set_node_name(node.get_name(), loop_cond_node);
return loop_cond_node->outputs();
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "helper_ops/next_iteration.hpp"
#include "common_op_table.hpp"
#include "helper_ops/merge.hpp"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "utils.hpp"
using namespace std;
using namespace ov;
using namespace ov::frontend::tensorflow;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_next_iteration_op(const NodeContext& node) {
default_op_checks(node, 0, {"NextIteration"});
auto next_iteration_node = make_shared<NextIteration>(node.get_decoder());
set_node_name(node.get_name(), next_iteration_node);
return next_iteration_node->outputs();
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -4,7 +4,7 @@
#include "common_op_table.hpp"
#include "input_model.hpp"
#include "openvino/opsets/opset10.hpp"
#include "tf_utils.hpp"
using namespace std;
using namespace ov;
@ -18,7 +18,7 @@ OutputVector translate_partitioned_call_op(const NodeContext& node) {
auto node_name = node.get_name();
auto translate_session = node.get_translate_session();
FRONT_END_GENERAL_CHECK(translate_session, "[TensorFlow Frontend] Internal error: Translate session is nullptr.");
auto operation_type = node.get_attribute<std::string>("f");
auto operation_type = node.get_attribute<string>("f");
// prepare a vector of inputs
OutputVector ov_inputs;
@ -33,9 +33,8 @@ OutputVector translate_partitioned_call_op(const NodeContext& node) {
// of StatefulPartitionedCall. And because otherwise they will cause a duplicates. But we need to keep them
// for "internal functions of Saved Model", which are named "__inference_signature_wrapper" or
// "__inference_wrapped_model".
auto body_model = translate_session->get_body_ov_model(operation_type,
ov_inputs,
operation_type.find("wrappe") == std::string::npos);
auto body_model =
translate_session->get_body_ov_model(operation_type, ov_inputs, operation_type.find("wrappe") == string::npos);
FRONT_END_OP_CONVERSION_CHECK(
body_model,
"[TensorFlow Frontend] Internal error or incorrect input model: body graph is not found for " + operation_type +
@ -43,11 +42,11 @@ OutputVector translate_partitioned_call_op(const NodeContext& node) {
// inject the body graph into the parent graph
OutputVector ov_outputs;
translate_session->inject_body_model(body_model, operation_type, ov_inputs, ov_outputs);
inject_body_model(body_model, operation_type, ov_inputs, ov_outputs);
// set output tensor names
for (size_t idx = 0; idx < ov_outputs.size(); ++idx) {
set_out_name({node_name + ":" + std::to_string(idx)}, ov_outputs[idx]);
set_out_name({node_name + ":" + to_string(idx)}, ov_outputs[idx]);
}
return ov_outputs;

View File

@ -4,11 +4,10 @@
#include "common_op_table.hpp"
#include "input_model.hpp"
#include "openvino/opsets/opset10.hpp"
#include "tf_utils.hpp"
using namespace std;
using namespace ov;
using namespace ov::opset10;
namespace ov {
namespace frontend {
@ -21,7 +20,7 @@ OutputVector translate_while_op(const NodeContext& node) {
auto input_size_t = node.get_input_size();
auto input_size = static_cast<int>(input_size_t);
ov::OutputVector ov_inputs;
OutputVector ov_inputs;
for (int input_ind = 0; input_ind < input_size; ++input_ind) {
ov_inputs.push_back(node.get_input(input_ind));
}
@ -30,8 +29,8 @@ OutputVector translate_while_op(const NodeContext& node) {
translate_session,
"[TensorFlow Frontend] Internal error: Translate session is nullptr.");
// retrieve condition and body graphs
auto cond_type = node.get_attribute<std::string>("cond");
auto body_type = node.get_attribute<std::string>("body");
auto cond_type = node.get_attribute<string>("cond");
auto body_type = node.get_attribute<string>("body");
auto cond_model = translate_session->get_body_ov_model(cond_type, ov_inputs);
TENSORFLOW_OP_VALIDATION(
node,
@ -43,82 +42,7 @@ OutputVector translate_while_op(const NodeContext& node) {
body_model,
"[TensorFlow Frontend] Internal error or incorrect input model. Cannot find body graph with name " + body_type);
// inject condition body graph prior to Loop node
// to check condition before to start iterations
auto cond_params = cond_model->get_parameters();
// type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present
// also for more accurate execution_condition variable shape deducing we need shape inference for condition graph
for (int input_ind = 0; input_ind < input_size; ++input_ind) {
cond_params[input_ind]->set_element_type(node.get_input(input_ind).get_element_type());
cond_params[input_ind]->set_partial_shape(node.get_input(input_ind).get_partial_shape());
}
cond_model->validate_nodes_and_infer_types();
auto cond_prior = cond_model->clone();
ov::OutputVector ov_outputs;
translate_session->inject_body_model(cond_prior, node.get_name() + "/cond", ov_inputs, ov_outputs);
TENSORFLOW_OP_VALIDATION(
node,
ov_outputs.size() == 1,
"[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node.");
auto exec_cond = ov_outputs[0];
auto trip_count = make_shared<Constant>(element::i32, Shape{}, -1);
auto loop = make_shared<Loop>(trip_count, exec_cond);
// prepare body model to be set for the Loop node
// note that condition should be computed on the updated input
// because this is while(cond) {} construction,
// that is why condition graph is stitched to the body results
auto body_params = body_model->get_parameters();
auto body_results = body_model->get_results();
auto cond_results = cond_model->get_results();
auto cond_params_size = cond_params.size();
TENSORFLOW_OP_VALIDATION(node,
body_params.size() == input_size_t,
"[TensorFlow Frontend] Internal error or inconsistent model: body graph "
" must have the same number of Parameter nodes as a number of inputs to While.");
TENSORFLOW_OP_VALIDATION(node,
body_results.size() == input_size_t,
"[TensorFlow Frontend] Internal error or inconsistent model: body graphs "
" must have the same number of Result nodes as a number of inputs to While.");
TENSORFLOW_OP_VALIDATION(node,
cond_params.size() == input_size_t,
"[TensorFlow Frontend] Internal error or inconsistent model: condition graph "
" must have the same number of Parameter nodes as a number of inputs to While.");
for (size_t param_ind = 0; param_ind < cond_params_size; ++param_ind) {
cond_params[param_ind]->output(0).replace(body_results[param_ind]->input_value(0));
}
// update body model with the new result that corresponds to execution condition
TENSORFLOW_OP_VALIDATION(
node,
cond_results.size() == 1 && cond_results[0],
"[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node.");
auto body_condition_output_idx = static_cast<int64_t>(body_results.size());
body_model->add_results(cond_results);
// type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present
for (int input_ind = 0; input_ind < input_size; ++input_ind) {
body_params[input_ind]->set_element_type(node.get_input(input_ind).get_element_type());
}
// set data for the Loop node
loop->set_function(body_model);
for (int input_ind = 0; input_ind < input_size; ++input_ind) {
loop->set_merged_input(body_params[input_ind],
node.get_input(input_ind),
body_results[input_ind]->input_value(0));
}
loop->set_special_body_ports({-1, body_condition_output_idx});
// set external outputs for Loop node
// do not get execution condition outside of the Loop node
for (size_t output_ind = 0; output_ind < input_size_t; ++output_ind) {
loop->get_iter_value(body_results[output_ind]);
}
loop->validate_and_infer_types();
auto loop = create_loop_for_tf_while(node.get_name(), body_model, cond_model, ov_inputs);
set_node_name(node.get_name(), loop);
return loop->outputs();
}

View File

@ -22,14 +22,18 @@ namespace op {
TF_OP_CONVERTER(translate_assignvariable_op);
TF_OP_CONVERTER(translate_block_lstm_op);
TF_OP_CONVERTER(translate_enter_op);
TF_OP_CONVERTER(translate_exit_op);
TF_OP_CONVERTER(translate_fifo_queue_op);
TF_OP_CONVERTER(translate_gru_block_cell_op);
TF_OP_CONVERTER(translate_hash_table_op);
TF_OP_CONVERTER(translate_if_op);
TF_OP_CONVERTER(translate_iterator_get_next_op);
TF_OP_CONVERTER(translate_iterator_op);
TF_OP_CONVERTER(translate_loop_cond_op);
TF_OP_CONVERTER(translate_merge_op);
TF_OP_CONVERTER(translate_mergev2checkpoint_op);
TF_OP_CONVERTER(translate_next_iteration_op);
TF_OP_CONVERTER(translate_partitioned_call_op);
TF_OP_CONVERTER(translate_placeholder_linked_op);
TF_OP_CONVERTER(translate_queue_dequeue_op);
@ -310,6 +314,12 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
// XLA operations
{"XlaConvV2", CreatorFunction(translate_xla_conv_v2_op)},
{"XlaDotV2", CreatorFunction(translate_xla_dot_op)},
// TF1 Control Flow operations
{"Enter", CreatorFunction(translate_enter_op)},
{"Exit", CreatorFunction(translate_exit_op)},
{"LoopCond", CreatorFunction(translate_loop_cond_op)},
{"NextIteration", CreatorFunction(translate_next_iteration_op)},
};
};
} // namespace op

View File

@ -12,9 +12,12 @@
#include "helper_ops/switch.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/frontend/exception.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/runtime/tensor.hpp"
using namespace ov;
using namespace ov::op;
using namespace ov::element;
using namespace ov::frontend::tensorflow;
using namespace std;
@ -369,6 +372,112 @@ bool propagate_conditional_flow(const OutputVector& ov_inputs,
return to_propagate;
}
// create Loop operation corresponding to TensorFlow While operation
shared_ptr<v5::Loop> create_loop_for_tf_while(const std::string& while_node_name,
const shared_ptr<Model>& body_model,
const shared_ptr<Model>& cond_model,
const OutputVector& ov_inputs) {
size_t input_size = ov_inputs.size();
// inject condition body graph prior to Loop node
// to check condition before to start iterations
auto cond_params = cond_model->get_parameters();
FRONT_END_GENERAL_CHECK(input_size == cond_params.size(),
"[TensorFlow Frontend] internal error: mismatch number of inputs to While and a number of "
"inputs in a conditional graph");
// type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present
// also for more accurate execution_condition variable shape deducing we need shape inference for condition graph
for (size_t input_ind = 0; input_ind < input_size; ++input_ind) {
cond_params[input_ind]->set_element_type(ov_inputs[input_ind].get_element_type());
cond_params[input_ind]->set_partial_shape(ov_inputs[input_ind].get_partial_shape());
}
cond_model->validate_nodes_and_infer_types();
auto cond_prior = cond_model->clone();
ov::OutputVector ov_outputs;
inject_body_model(cond_prior, while_node_name + "/cond", ov_inputs, ov_outputs);
FRONT_END_GENERAL_CHECK(
ov_outputs.size() == 1,
"[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node.");
auto exec_cond = ov_outputs[0];
auto trip_count = make_shared<v0::Constant>(element::i32, Shape{}, -1);
auto loop = make_shared<v5::Loop>(trip_count, exec_cond);
// prepare body model to be set for the Loop node
// note that condition should be computed on the updated input
// because this is while(cond) {} construction,
// that is why condition graph is stitched to the body results
auto body_params = body_model->get_parameters();
auto body_results = body_model->get_results();
auto cond_results = cond_model->get_results();
FRONT_END_GENERAL_CHECK(body_params.size() == input_size,
"[TensorFlow Frontend] Internal error or inconsistent model: body graph "
" must have the same number of Parameter nodes as a number of inputs to While.");
FRONT_END_GENERAL_CHECK(cond_params.size() == input_size,
"[TensorFlow Frontend] Internal error or inconsistent model: condition graph "
" must have the same number of Parameter nodes as a number of inputs to While.");
for (size_t param_ind = 0; param_ind < body_results.size(); ++param_ind) {
cond_params[param_ind]->output(0).replace(body_results[param_ind]->input_value(0));
}
// update body model with the new result that corresponds to execution condition
FRONT_END_GENERAL_CHECK(
cond_results.size() == 1 && cond_results[0],
"[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node.");
auto body_condition_output_idx = static_cast<int64_t>(body_results.size());
body_model->add_results(cond_results);
// type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present
for (size_t input_ind = 0; input_ind < input_size; ++input_ind) {
body_params[input_ind]->set_element_type(ov_inputs[input_ind].get_element_type());
}
// set data for the Loop node
loop->set_function(body_model);
// body_results may contain less nodes than body_params that means back edge exists not for all body_params
for (size_t input_ind = 0; input_ind < static_cast<size_t>(body_condition_output_idx); ++input_ind) {
loop->set_merged_input(body_params[input_ind], ov_inputs[input_ind], body_results[input_ind]->input_value(0));
}
loop->set_special_body_ports({-1, body_condition_output_idx});
// set external outputs for Loop node
// do not get execution condition outside of the Loop node
for (size_t output_ind = 0; output_ind < static_cast<size_t>(body_condition_output_idx); ++output_ind) {
loop->get_iter_value(body_results[output_ind]);
}
loop->validate_and_infer_types();
return loop;
}
void inject_body_model(std::shared_ptr<ov::Model> ov_model_to_inject,
const std::string& operation_type,
const ov::OutputVector& ov_inputs,
ov::OutputVector& ov_outputs) {
ov_outputs.clear();
auto body_parameters = ov_model_to_inject->get_parameters();
FRONT_END_GENERAL_CHECK(body_parameters.size() == ov_inputs.size(),
"[TensorFlow Error] Internal error or incorrect input models: number of "
"inputs and arguments to the function " +
operation_type + " do not match.");
for (size_t param_ind = 0; param_ind < body_parameters.size(); ++param_ind) {
auto orig_type = body_parameters[param_ind]->get_element_type();
// avoid not needed tensor names from body graph Parameter node after replacing
body_parameters[param_ind]->output(0).set_names({});
body_parameters[param_ind]->output(0).replace(ov_inputs[param_ind]);
if (auto ext_parameter = as_type_ptr<v0::Parameter>(ov_inputs[param_ind].get_node_shared_ptr())) {
// save type of a Parameter as converted in the body
// this is important if the external conversion extension is applied to body graph node
// with setting its own type
if (orig_type != element::dynamic) {
ext_parameter->set_element_type(orig_type);
}
}
}
for (const auto& result_node : ov_model_to_inject->get_results()) {
ov_outputs.push_back(result_node->input_value(0));
}
}
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -12,6 +12,7 @@
#include "openvino/core/type.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/frontend/node_context.hpp"
#include "openvino/op/loop.hpp"
#include "openvino/runtime/tensor.hpp"
#include "tensor.pb.h"
#include "tensor_shape.pb.h"
@ -102,6 +103,18 @@ bool propagate_conditional_flow(const ov::OutputVector& ov_inputs,
// copy existing markers from copy_from to copy_to marker
void copy_conditional_flow_marker(const CfMarkerType& copy_from, CfMarkerType& copy_to);
// create Loop operation corresponding to TensorFlow While operation
std::shared_ptr<ov::op::v5::Loop> create_loop_for_tf_while(const std::string& while_node_name,
const std::shared_ptr<ov::Model>& body_model,
const std::shared_ptr<ov::Model>& cond_model,
const ov::OutputVector& ov_inputs);
// inject a graph by given inputs and return outputs of the injected graph
void inject_body_model(std::shared_ptr<ov::Model> ov_model_to_inject,
const std::string& operation_type,
const ov::OutputVector& ov_inputs,
ov::OutputVector& ov_outputs);
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -4,6 +4,11 @@
#include "translate_session.hpp"
#include "helper_ops/enter.hpp"
#include "helper_ops/loop_cond.hpp"
#include "helper_ops/merge.hpp"
#include "helper_ops/next_iteration.hpp"
#include "helper_ops/switch.hpp"
#include "input_model.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/opsets/opset10.hpp"
@ -154,6 +159,148 @@ size_t get_flat_index_by_name_and_id(const ov::frontend::NamedOutputVector& outp
return idx;
}
}
// create Parameter node that will produce given tensor
std::shared_ptr<ov::opset8::Parameter> create_parameter_node_for_tensor(ov::Output<ov::Node> output_tensor) {
auto param =
std::make_shared<ov::opset8::Parameter>(output_tensor.get_element_type(), output_tensor.get_partial_shape());
param->output(0).set_names(output_tensor.get_names());
output_tensor.replace(param->output(0));
return param;
}
void fuse_loop_cond(std::shared_ptr<LoopCond>& loop_cond,
OpMap& ov_tensors_map,
const std::vector<std::shared_ptr<Enter>>& enter_ops) {
// ov_tensors_map maps a operation name to a vector of its output tensors
auto node_name = loop_cond->get_friendly_name();
// find key points for condition and body graphs
FRONT_END_GENERAL_CHECK(loop_cond, "[TensorFlow Frontend] internal error: pointer to LoopCond node is nullptr");
// extract condition and body graphs
// scan LoopCond node vicinity
// 1. LoopCond has just one output
// walk through all consuming inputs that are expected to be only for Switch nodes
std::vector<std::shared_ptr<Switch>> switch_nodes;
for (const auto& consuming_input : loop_cond->get_output_target_inputs(0)) {
auto switch_node = ov::as_type_ptr<Switch>(consuming_input.get_node()->shared_from_this());
FRONT_END_GENERAL_CHECK(switch_node,
"[TensorFlow Frontend] internal error or inconsistent model: consumer of LoopCond "
"output is not Switch operation");
switch_nodes.push_back(switch_node);
}
// collect all output tensors for Loop
// the created Loop node outputs will be connected with ov_outputs
size_t num_inputs = switch_nodes.size();
FRONT_END_GENERAL_CHECK(num_inputs > 0,
"[TensorFlow Frontend] internal error: LoopCond node has no output Switch nodes");
ov::OutputVector ov_outputs(num_inputs);
// collect ov_inputs (a list of Tensors) that will provide input data for the created Loop node
ov::OutputVector ov_inputs(num_inputs);
ov::ParameterVector cond_params(num_inputs);
ov::ParameterVector body_params(num_inputs);
ov::OutputVector ov_body_outputs(num_inputs);
std::vector<std::string> output_tensor_names(num_inputs);
std::set<std::shared_ptr<Enter>> met_enter_ops;
std::string frame_name;
for (size_t ind = 0; ind < num_inputs; ++ind) {
// Switch node has two outputs:
// 0 (output_false) - interrupt the loop, 1 (output_true) - continue the loop
// check if Exit node exists
auto switch_node = switch_nodes[ind];
FRONT_END_GENERAL_CHECK(
switch_node->get_output_target_inputs(0).size() < 2,
"[TensorFlow Frontend] internal error or inconsistent model: Switch node has more than one Exit nodes");
if (switch_node->get_output_target_inputs(0).size() == 1) {
auto exit_node = (*switch_node->get_output_target_inputs(0).begin()).get_node();
ov_outputs[ind] = exit_node->output(0);
output_tensor_names[ind] = exit_node->get_friendly_name() + ":0";
}
auto merge_node = ov::as_type_ptr<Merge>(switch_node->input_value(0).get_node_shared_ptr());
FRONT_END_GENERAL_CHECK(merge_node,
"[TensorFlow Frontend] internal error or inconsistent model: Data for Switch node is "
"not produced by Merge node for While operation");
// create Parameter node for condition graph
cond_params[ind] = create_parameter_node_for_tensor(merge_node->output(0));
body_params[ind] = create_parameter_node_for_tensor(switch_node->output(1));
// check that Merge node has Enter and NextIteration producers
auto enter = ov::as_type_ptr<Enter>(merge_node->input_value(0).get_node_shared_ptr());
auto next_iteration = ov::as_type_ptr<NextIteration>(merge_node->input_value(0).get_node_shared_ptr());
if (!enter) {
enter = ov::as_type_ptr<Enter>(merge_node->input_value(1).get_node_shared_ptr());
}
if (!next_iteration) {
next_iteration = ov::as_type_ptr<NextIteration>(merge_node->input_value(1).get_node_shared_ptr());
}
FRONT_END_GENERAL_CHECK(enter && next_iteration,
"[TensorFlow Frontend] internal error or inconsistent model: inputs of Merge node in "
"While sub-graph are not Enter and NextIteration");
ov_inputs[ind] = enter->input_value(0);
met_enter_ops.insert(enter);
frame_name = enter->get_frame_name();
// retrieve output tensor for body graph that is an input to NextIteration node
std::string producer_name;
size_t producer_output_port_idx;
next_iteration->get_producer(producer_name, producer_output_port_idx);
FRONT_END_GENERAL_CHECK(
ov_tensors_map.count(producer_name) > 0,
"[TensorFlow Frontend] internal error: NextIteration producer is not found in the tensor map");
auto producer_outputs = ov_tensors_map.at(producer_name);
FRONT_END_GENERAL_CHECK(
producer_output_port_idx < producer_outputs.size(),
"[TensorFlow Frontend] internal error: NextIteration producer has insufficient number of outputs");
auto ov_body_output = producer_outputs[producer_output_port_idx].port;
if (ov_body_output.get_node_shared_ptr() == switch_node) {
// this is case when NextIteration node is connected with Switch node
ov_body_outputs[ind] = body_params[ind]->output(0);
} else {
ov_body_outputs[ind] = ov_body_output;
}
}
auto ov_cond_output = loop_cond->input_values();
// insert additional inputs for future Loop node
for (auto& enter : enter_ops) {
if (met_enter_ops.find(enter) == met_enter_ops.end() && enter->get_frame_name() == frame_name) {
ov_inputs.push_back(enter->input_value(0));
auto additional_param = create_parameter_node_for_tensor(enter->output(0));
cond_params.push_back(additional_param);
body_params.push_back(additional_param);
}
}
// create a copy of conditional graph
auto cond_model = std::make_shared<ov::Model>(ov_cond_output, cond_params);
auto body_model = std::make_shared<ov::Model>(ov_body_outputs, body_params);
auto loop_node = create_loop_for_tf_while(node_name, body_model, cond_model, ov_inputs);
auto loop_model = std::make_shared<ov::Model>(loop_node->outputs());
size_t loop_node_output_size = loop_node->get_output_size();
FRONT_END_GENERAL_CHECK(loop_node_output_size == num_inputs,
"[TensorFlow Frontend] internal error: the created Loop node to replace TF1 While has "
"unexpected number of outputs");
for (size_t output_ind = 0; output_ind < loop_node_output_size; ++output_ind) {
auto producer_node = ov_outputs[output_ind].get_node_shared_ptr();
if (producer_node) {
std::string producer_name = producer_node->get_friendly_name();
size_t producer_output_port_idx = ov_outputs[output_ind].get_index();
// work only for non-empty ov::Output<ov::Node>
ov_outputs[output_ind].replace(loop_node->output(output_ind));
ov_outputs[output_ind].set_names({output_tensor_names[output_ind]});
if (ov_tensors_map.count(producer_name) &&
producer_output_port_idx < ov_tensors_map.at(producer_name).size()) {
ov_tensors_map.at(producer_name)[producer_output_port_idx] = ov_outputs[output_ind];
}
}
}
}
} // namespace
TranslateSession::TranslateSession(const ov::frontend::InputModel::Ptr& input_model,
@ -173,37 +320,12 @@ std::shared_ptr<ov::Model> TranslateSession::get_converted_model() {
return m_ov_model;
}
void TranslateSession::inject_body_model(std::shared_ptr<ov::Model> body_model,
const std::string& operation_type,
const ov::OutputVector& ov_inputs,
ov::OutputVector& ov_outputs) {
ov_outputs.clear();
auto body_parameters = body_model->get_parameters();
FRONT_END_GENERAL_CHECK(body_parameters.size() == ov_inputs.size(),
"[TensorFlow Error] Internal error or incorrect input models: number of "
"inputs and arguments to the function " +
operation_type + " do not match.");
for (size_t param_ind = 0; param_ind < body_parameters.size(); ++param_ind) {
auto orig_type = body_parameters[param_ind]->get_element_type();
body_parameters[param_ind]->output(0).replace(ov_inputs[param_ind]);
if (auto ext_parameter = as_type_ptr<ov::opset8::Parameter>(ov_inputs[param_ind].get_node_shared_ptr())) {
// save type of a Parameter as converted in the body
// this is important if the external conversion extension is applied to body graph node
// with setting its own type
if (orig_type != element::dynamic) {
ext_parameter->set_element_type(orig_type);
}
}
}
for (const auto& result_node : body_model->get_results()) {
ov_outputs.push_back(result_node->input_value(0));
}
}
void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& input_model,
std::shared_ptr<ov::Model>& ov_model) {
OpMap ng_op_map;
ControlDepsMap control_deps_map;
std::vector<std::shared_ptr<LoopCond>> loop_cond_ops;
std::vector<std::shared_ptr<Enter>> enter_ops;
ov::ParameterVector params;
ov::ResultVector results;
@ -375,6 +497,19 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu
ov_outputs = named_from_indexed(fw_node->outputs());
}
// save LoopCond operations in topological order for further fusing
if (ov_outputs.size() == 1 && as_type_ptr<LoopCond>(ov_outputs[0].port.get_node_shared_ptr())) {
loop_cond_ops.push_back(as_type_ptr<LoopCond>(ov_outputs[0].port.get_node_shared_ptr()));
} else if (ov_outputs.size() == 1 && as_type_ptr<Enter>(ov_outputs[0].port.get_node_shared_ptr())) {
enter_ops.push_back(as_type_ptr<Enter>(ov_outputs[0].port.get_node_shared_ptr()));
} else if (ov_outputs.size() == 1 && as_type_ptr<NextIteration>(ov_outputs[0].port.get_node_shared_ptr())) {
std::string producer_name;
size_t producer_output_port_idx;
operation_place->get_next_iteration_back_edge(producer_name, producer_output_port_idx);
auto next_iteration = as_type_ptr<NextIteration>(ov_outputs[0].port.get_node_shared_ptr());
next_iteration->set_producer(producer_name, producer_output_port_idx);
}
// create input control dependencies set for the current operation node
std::set<ov::Output<ov::Node>> input_control_deps;
for (const auto& control_dep_name : control_dependencies_names) {
@ -526,6 +661,14 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu
ov::ParameterVector ordered_params = reorder_ops_by_names(input_names, params);
ov::ResultVector ordered_results = reorder_ops_by_names(output_names, results);
// before adding Result nodes to terminal nodes
// it fuses TF1 Control flow based While operation to Loop operation
// it needs to perform this in the reverse order
std::reverse(loop_cond_ops.begin(), loop_cond_ops.end());
for (auto& loop_cond_op : loop_cond_ops) {
fuse_loop_cond(loop_cond_op, ng_op_map, enter_ops);
}
ov_model = std::make_shared<ov::Model>(ordered_results, ordered_params, m_model_name);
}

View File

@ -42,11 +42,6 @@ public:
void translate_graph(const ov::frontend::InputModel::Ptr& input_model, std::shared_ptr<ov::Model>& ov_model);
void inject_body_model(std::shared_ptr<ov::Model> body_model,
const std::string& operation_type,
const ov::OutputVector& ov_inputs,
ov::OutputVector& ov_outputs);
std::shared_ptr<ov::Model> get_body_ov_model(const std::string& body_graph_name,
const ov::OutputVector& ov_inputs,
bool clear_names = true);

View File

@ -115,33 +115,6 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) {
ASSERT_NO_THROW(frontEnd->convert(model));
}
TEST(FrontEndConvertModelTest, test_unsupported_tf1_while) {
FrontEndManager fem;
FrontEnd::Ptr frontEnd;
InputModel::Ptr inputModel;
ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_FE));
ASSERT_NE(frontEnd, nullptr);
auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_MODELS_DIRNAME) +
string("model_tf1_while/model_tf1_while.pbtxt"));
ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename));
ASSERT_NE(inputModel, nullptr);
shared_ptr<ov::Model> model;
try {
model = frontEnd->convert(inputModel);
FAIL() << "TensorFlow 1 While is not supported in TF FE but conversion passed without errors. "
"OpConversionFailure is expected.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message = "[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, "
"LoopCond, Merge, NextIteration, Switch";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
ASSERT_EQ(model, nullptr);
} catch (...) {
FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason.";
}
}
TEST_F(FrontEndConversionWithReferenceTestsF, ModelWithDynamicType) {
{ model = convert_model_partially("dynamic_type_model/dynamic_type_model.pb"); }
{
@ -169,11 +142,12 @@ TEST(FrontEndConvertModelTest, test_unsupported_tf1_while_and_incorrect_less_tra
"OpConversionFailure is expected.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message = "Less expects ten inputs.\n"
"\n"
"[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, "
"LoopCond, Merge, NextIteration, Switch";
string ref_message = "Less expects ten inputs.\n";
string not_found_message =
"[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, "
"LoopCond, Merge, NextIteration, Switch";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
ASSERT_TRUE(error_message.find(not_found_message) == string::npos);
ASSERT_EQ(model, nullptr);
} catch (...) {
FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason.";
@ -191,14 +165,12 @@ TEST(FrontEndConvertModelTest, conversion_with_unknown_exception) {
"OpConversionFailure is expected.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message = "Unknown exception type\n"
"[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, "
"LoopCond, Merge, NextIteration, Switch";
string ref_message = "Unknown exception type\n";
string doc_message =
"To facilitate the conversion of unsupported operations, refer to Frontend Extension documentation: "
"https://docs.openvino.ai/latest/openvino_docs_Extensibility_UG_Frontend_Extensions.html";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
ASSERT_TRUE(error_message.find(doc_message) != string::npos);
ASSERT_TRUE(error_message.find(doc_message) == string::npos);
ASSERT_EQ(model, nullptr);
} catch (...) {
FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason.";

View File

@ -0,0 +1,47 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "internal_operation.hpp"
#include "tf_utils.hpp"
namespace ov {
namespace frontend {
namespace tensorflow {
// Internal operation for Enter that marks entry point for data going to Loop in the graph
// It is used along with Exit operation
class Enter : public InternalOperation {
public:
OPENVINO_OP("Enter", "ov::frontend::tensorflow", InternalOperation);
Enter(const Output<Node>& data,
const std::string frame_name,
const std::shared_ptr<DecoderBase>& decoder = std::make_shared<DecoderFake>())
: InternalOperation(decoder, OutputVector{data}, 1, "Enter"),
m_frame_name(frame_name) {
validate_and_infer_types();
}
void validate_and_infer_types() override {
auto data_type = get_input_element_type(0);
auto data_shape = get_input_partial_shape(0);
set_output_type(0, data_type, data_shape);
}
std::string get_frame_name() const {
return m_frame_name;
}
private:
std::string m_frame_name;
};
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "internal_operation.hpp"
#include "tf_utils.hpp"
namespace ov {
namespace frontend {
namespace tensorflow {
// Internal operation for Exit that marks exit point for data going from Loop in the graph
// It is used along with Enter operation
class Exit : public InternalOperation {
public:
OPENVINO_OP("Exit", "ov::frontend::tensorflow", InternalOperation);
Exit(const Output<Node>& data, const std::shared_ptr<DecoderBase>& decoder = std::make_shared<DecoderFake>())
: InternalOperation(decoder, OutputVector{data}, 1, "Exit") {
validate_and_infer_types();
}
void validate_and_infer_types() override {
auto data_type = get_input_element_type(0);
auto data_shape = get_input_partial_shape(0);
set_output_type(0, data_type, data_shape);
}
};
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "internal_operation.hpp"
#include "tf_utils.hpp"
namespace ov {
namespace frontend {
namespace tensorflow {
// Internal operation for Loop that represents the loop termination condition
// by the pivot switches of a loop
class LoopCond : public InternalOperation {
public:
OPENVINO_OP("LoopCond", "ov::frontend::tensorflow", InternalOperation);
LoopCond(const Output<Node>& input, const std::shared_ptr<DecoderBase>& decoder = std::make_shared<DecoderFake>())
: InternalOperation(decoder, OutputVector{input}, 1, "LoopCond") {
validate_and_infer_types();
}
void validate_and_infer_types() override {
auto data_type = get_input_element_type(0);
auto data_shape = get_input_partial_shape(0);
set_output_type(0, data_type, data_shape);
}
};
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,54 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "internal_operation.hpp"
#include "merge.hpp"
#include "tf_utils.hpp"
namespace ov {
namespace frontend {
namespace tensorflow {
// Internal operation for NextIteration that makes its input available to the next iteration
// the output is going to Merge node.
class NextIteration : public InternalOperation {
public:
OPENVINO_OP("NextIteration", "ov::frontend::tensorflow", InternalOperation);
NextIteration(const std::shared_ptr<DecoderBase>& decoder = std::make_shared<DecoderFake>())
: InternalOperation(decoder, OutputVector{}, 1, "NextIteration"),
m_back_edge_set(false) {
validate_and_infer_types();
}
void validate_and_infer_types() override {
set_output_type(0, ov::element::dynamic, ov::PartialShape::dynamic());
}
void set_producer(const std::string& producer_name, size_t producer_output_port_idx) {
m_producer_name = producer_name;
m_producer_output_port_idx = producer_output_port_idx;
m_back_edge_set = true;
}
void get_producer(std::string& producer_name, size_t& producer_output_port_idx) const {
FRONT_END_GENERAL_CHECK(m_back_edge_set,
"[TensorFlow Frontend] internal error: back edge for NextIteration is not set");
producer_name = m_producer_name;
producer_output_port_idx = m_producer_output_port_idx;
}
private:
bool m_back_edge_set;
std::string m_producer_name;
size_t m_producer_output_port_idx;
};
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -131,10 +131,22 @@ public:
Ptr get_target_tensor() const override;
Ptr get_target_tensor(int outputPortIndex) const override;
// set back edge for OpPlace of NextIteration operation
// this is needed since we break a cycle in a graph
void set_next_iteration_back_edge(const std::string& next_iteration_producer_name,
size_t next_iteration_producer_output_port_idx);
void get_next_iteration_back_edge(std::string& next_iteration_producer_name,
size_t& next_iteration_producer_output_port_idx) const;
private:
std::shared_ptr<DecoderBase> m_op_decoder;
std::map<std::string, std::vector<std::shared_ptr<InPortPlace>>> m_input_ports;
std::vector<std::shared_ptr<OutPortPlace>> m_output_ports;
// flag if back edge is set
bool m_back_edge_set;
std::string m_next_iteration_producer_name;
size_t m_next_iteration_producer_output_port_idx;
};
class TensorPlace : public Place {

View File

@ -35,7 +35,9 @@ public:
}
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& inputs) const override {
return std::make_shared<FrameworkNode>(m_decoder, inputs, get_output_size());
auto fw_node = std::make_shared<FrameworkNode>(m_decoder, inputs, get_output_size());
fw_node->set_attrs(get_attrs());
return fw_node;
}
std::string get_op_type() const {

View File

@ -29,7 +29,23 @@ bool Place::is_output() const {
OpPlace::OpPlace(const ov::frontend::InputModel& input_model, std::shared_ptr<DecoderBase> op_decoder)
: Place(input_model, {op_decoder->get_op_name()}),
m_op_decoder(op_decoder) {}
m_op_decoder(op_decoder),
m_back_edge_set(false) {}
void OpPlace::set_next_iteration_back_edge(const std::string& next_iteration_producer_name,
size_t next_iteration_producer_output_port_idx) {
m_next_iteration_producer_name = next_iteration_producer_name;
m_next_iteration_producer_output_port_idx = next_iteration_producer_output_port_idx;
m_back_edge_set = true;
}
void OpPlace::get_next_iteration_back_edge(std::string& next_iteration_producer_name,
size_t& next_iteration_producer_output_port_idx) const {
FRONT_END_GENERAL_CHECK(m_back_edge_set,
"[TensorFlow Frontend] internal error: back edge for NextIteration is not set");
next_iteration_producer_name = m_next_iteration_producer_name;
next_iteration_producer_output_port_idx = m_next_iteration_producer_output_port_idx;
}
const std::vector<std::shared_ptr<OutPortPlace>>& OpPlace::get_output_ports() const {
return m_output_ports;

View File

@ -49,8 +49,7 @@ class TestWhile(CommonTFLayerTest):
test_data_basic = [
dict(y_shape=[2, 3], data_type=np.int32, lower_control_flow=False),
dict(y_shape=[2, 1, 4], data_type=np.int32, lower_control_flow=False),
pytest.param(dict(y_shape=[2, 1, 4], data_type=np.int32, lower_control_flow=True),
marks=pytest.mark.xfail(reason="105670"))
dict(y_shape=[2, 1, 4], data_type=np.int32, lower_control_flow=True)
]
@pytest.mark.parametrize("params", test_data_basic)
@ -108,8 +107,7 @@ class TestWhileShapeVariant(CommonTFLayerTest):
test_data_basic = [
dict(y_shape=[2, 3], lower_control_flow=False),
dict(y_shape=[2, 1, 4], lower_control_flow=False),
pytest.param(dict(y_shape=[2, 1, 4], lower_control_flow=True),
marks=pytest.mark.xfail(reason="105670"))
dict(y_shape=[2, 1, 4], lower_control_flow=True)
]
@pytest.mark.parametrize("params", test_data_basic)

View File

@ -312,9 +312,8 @@ def update_fallback_with_conversion_error(use_new_frontend: bool, is_tf: bool, e
conversion_error_re = r"^(\[TensorFlow\ Frontend\]\ Internal\ error\,\ no\ translator\ found\ for\ operation\(s\)\:\ )((\w+)(\,\ \w+)*)$"
conversion_error_match = re.findall(conversion_error_re, ex_msg, re.MULTILINE)
all_fallback_operations = [
# corresponds to TF1 While operation
# corresponds to TF1 TensorList operation
"TensorArrayScatterV3", "TensorArrayV3", "TensorArraySizeV3", "TensorArrayGatherV3",
"LoopCond", "Enter", "NextIteration", "Exit",
# corresponds to operations with complex tensors
"FFT", "FFT2D", "FFT3D", "IFFT", "IFFT2D", "IFFT3D",
"RFFT", "RFFT2D", "RFFT3D", "IRFFT", "IRFFT2D", "IRFFT3D",

View File

@ -243,8 +243,7 @@ class TestMoFreezePlaceholderTFFE(unittest.TestCase):
def test_conversion_failure_fallback_use_new_frontend(self):
with self.assertRaisesRegex(Exception,
"\[TensorFlow Frontend\] Internal error, no translator found for operation\(s\)\: "
"Enter\, Exit\, LoopCond\, Merge\, NextIteration\, Switch\, TensorArrayGatherV3\, "
"TensorArraySizeV3\, TensorArrayV3"):
"TensorArrayGatherV3\, TensorArrayReadV3\, TensorArraySizeV3\, TensorArrayV3\, TensorArrayWriteV3"):
self.basic("ctc_model_based.pbtxt", None, None, None, None,
None, None, True, True, True, False)